2024-11-27 03:31:59,633 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-27 03:31:59,644 main DEBUG Took 0.009668 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-27 03:31:59,645 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-27 03:31:59,646 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-27 03:31:59,647 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-27 03:31:59,648 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,655 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-27 03:31:59,667 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,669 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,669 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,670 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,671 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,671 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,672 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,673 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,673 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,674 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,675 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,675 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,676 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,676 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,677 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,677 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,678 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,679 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,679 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,680 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,680 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,681 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,681 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-27 03:31:59,681 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,681 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-27 03:31:59,683 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-27 03:31:59,684 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-27 03:31:59,686 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-27 03:31:59,686 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-27 03:31:59,687 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-27 03:31:59,688 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-27 03:31:59,695 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-27 03:31:59,697 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-27 03:31:59,699 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-27 03:31:59,700 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-27 03:31:59,700 main DEBUG createAppenders(={Console}) 2024-11-27 03:31:59,701 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-27 03:31:59,701 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-27 03:31:59,701 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-27 03:31:59,702 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-27 03:31:59,702 main DEBUG OutputStream closed 2024-11-27 03:31:59,702 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-27 03:31:59,702 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-27 03:31:59,703 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-27 03:31:59,770 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-27 03:31:59,772 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-27 03:31:59,773 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-27 03:31:59,774 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-27 03:31:59,774 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-27 03:31:59,774 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-27 03:31:59,775 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-27 03:31:59,775 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-27 03:31:59,775 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-27 03:31:59,776 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-27 03:31:59,776 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-27 03:31:59,776 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-27 03:31:59,776 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-27 03:31:59,777 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-27 03:31:59,777 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-27 03:31:59,777 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-27 03:31:59,778 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-27 03:31:59,778 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-27 03:31:59,780 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-27 03:31:59,781 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-27 03:31:59,781 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-27 03:31:59,781 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-27T03:32:00,006 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf 2024-11-27 03:32:00,008 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-27 03:32:00,009 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-27T03:32:00,017 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-27T03:32:00,050 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=292, ProcessCount=11, AvailableMemoryMB=9008 2024-11-27T03:32:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-27T03:32:00,069 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8, deleteOnExit=true 2024-11-27T03:32:00,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-27T03:32:00,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/test.cache.data in system properties and HBase conf 2024-11-27T03:32:00,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/hadoop.tmp.dir in system properties and HBase conf 2024-11-27T03:32:00,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/hadoop.log.dir in system properties and HBase conf 2024-11-27T03:32:00,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-27T03:32:00,072 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-27T03:32:00,073 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-27T03:32:00,152 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-27T03:32:00,238 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-27T03:32:00,242 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:32:00,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:32:00,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-27T03:32:00,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:32:00,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-27T03:32:00,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-27T03:32:00,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:32:00,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:32:00,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-27T03:32:00,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/nfs.dump.dir in system properties and HBase conf 2024-11-27T03:32:00,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/java.io.tmpdir in system properties and HBase conf 2024-11-27T03:32:00,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:32:00,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-27T03:32:00,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-27T03:32:00,718 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:32:01,317 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-27T03:32:01,390 INFO [Time-limited test {}] log.Log(170): Logging initialized @2433ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-27T03:32:01,467 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:32:01,533 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:32:01,554 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:32:01,555 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:32:01,556 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:32:01,569 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:32:01,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:32:01,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:32:01,752 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/java.io.tmpdir/jetty-localhost-40129-hadoop-hdfs-3_4_1-tests_jar-_-any-18318687632660142322/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:32:01,761 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:40129} 2024-11-27T03:32:01,762 INFO [Time-limited test {}] server.Server(415): Started @2806ms 2024-11-27T03:32:01,797 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:32:02,313 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:32:02,320 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:32:02,321 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:32:02,321 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:32:02,322 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:32:02,322 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:32:02,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:32:02,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/java.io.tmpdir/jetty-localhost-46143-hadoop-hdfs-3_4_1-tests_jar-_-any-4400321750264380803/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:32:02,425 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:46143} 2024-11-27T03:32:02,426 INFO [Time-limited test {}] server.Server(415): Started @3470ms 2024-11-27T03:32:02,479 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:32:02,581 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:32:02,588 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:32:02,590 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:32:02,590 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:32:02,590 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:32:02,592 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:32:02,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:32:02,729 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/java.io.tmpdir/jetty-localhost-43729-hadoop-hdfs-3_4_1-tests_jar-_-any-16558407888104881157/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:32:02,730 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:43729} 2024-11-27T03:32:02,730 INFO [Time-limited test {}] server.Server(415): Started @3774ms 2024-11-27T03:32:02,732 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:32:03,858 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/data/data2/current/BP-1803410911-172.17.0.3-1732678320802/current, will proceed with Du for space computation calculation, 2024-11-27T03:32:03,858 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/data/data1/current/BP-1803410911-172.17.0.3-1732678320802/current, will proceed with Du for space computation calculation, 2024-11-27T03:32:03,890 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:32:03,914 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/data/data4/current/BP-1803410911-172.17.0.3-1732678320802/current, will proceed with Du for space computation calculation, 2024-11-27T03:32:03,914 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/data/data3/current/BP-1803410911-172.17.0.3-1732678320802/current, will proceed with Du for space computation calculation, 2024-11-27T03:32:03,937 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:32:03,941 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2626999d00112df8 with lease ID 0x5d435cb3f2cea0fe: Processing first storage report for DS-14b6d21e-21f4-44be-a2c9-4f681f40a014 from datanode DatanodeRegistration(127.0.0.1:36337, datanodeUuid=8da65dfb-4ab0-4cb2-a6d4-57e861d2aa13, infoPort=37045, infoSecurePort=0, ipcPort=39037, storageInfo=lv=-57;cid=testClusterID;nsid=482849681;c=1732678320802) 2024-11-27T03:32:03,942 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2626999d00112df8 with lease ID 0x5d435cb3f2cea0fe: from storage DS-14b6d21e-21f4-44be-a2c9-4f681f40a014 node DatanodeRegistration(127.0.0.1:36337, datanodeUuid=8da65dfb-4ab0-4cb2-a6d4-57e861d2aa13, infoPort=37045, infoSecurePort=0, ipcPort=39037, storageInfo=lv=-57;cid=testClusterID;nsid=482849681;c=1732678320802), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-27T03:32:03,943 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2626999d00112df8 with lease ID 0x5d435cb3f2cea0fe: Processing first storage report for DS-05c4b145-e1c3-410e-87b5-b7f19a6ff695 from datanode DatanodeRegistration(127.0.0.1:36337, datanodeUuid=8da65dfb-4ab0-4cb2-a6d4-57e861d2aa13, infoPort=37045, infoSecurePort=0, ipcPort=39037, storageInfo=lv=-57;cid=testClusterID;nsid=482849681;c=1732678320802) 2024-11-27T03:32:03,943 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2626999d00112df8 with lease ID 0x5d435cb3f2cea0fe: from storage DS-05c4b145-e1c3-410e-87b5-b7f19a6ff695 node DatanodeRegistration(127.0.0.1:36337, datanodeUuid=8da65dfb-4ab0-4cb2-a6d4-57e861d2aa13, infoPort=37045, infoSecurePort=0, ipcPort=39037, storageInfo=lv=-57;cid=testClusterID;nsid=482849681;c=1732678320802), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-27T03:32:03,944 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x651619ad097bccff with lease ID 0x5d435cb3f2cea0ff: Processing first storage report for DS-d29c6a91-133e-4dc1-82be-295986de0303 from datanode DatanodeRegistration(127.0.0.1:39445, datanodeUuid=d88a7120-d1af-4a1f-9828-3e5c578e8aae, infoPort=36427, infoSecurePort=0, ipcPort=34957, storageInfo=lv=-57;cid=testClusterID;nsid=482849681;c=1732678320802) 2024-11-27T03:32:03,945 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x651619ad097bccff with lease ID 0x5d435cb3f2cea0ff: from storage DS-d29c6a91-133e-4dc1-82be-295986de0303 node DatanodeRegistration(127.0.0.1:39445, datanodeUuid=d88a7120-d1af-4a1f-9828-3e5c578e8aae, infoPort=36427, infoSecurePort=0, ipcPort=34957, storageInfo=lv=-57;cid=testClusterID;nsid=482849681;c=1732678320802), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:32:03,945 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x651619ad097bccff with lease ID 0x5d435cb3f2cea0ff: Processing first storage report for DS-d21a166d-8e65-42df-8370-d25d98f35555 from datanode DatanodeRegistration(127.0.0.1:39445, datanodeUuid=d88a7120-d1af-4a1f-9828-3e5c578e8aae, infoPort=36427, infoSecurePort=0, ipcPort=34957, storageInfo=lv=-57;cid=testClusterID;nsid=482849681;c=1732678320802) 2024-11-27T03:32:03,945 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x651619ad097bccff with lease ID 0x5d435cb3f2cea0ff: from storage DS-d21a166d-8e65-42df-8370-d25d98f35555 node DatanodeRegistration(127.0.0.1:39445, datanodeUuid=d88a7120-d1af-4a1f-9828-3e5c578e8aae, infoPort=36427, infoSecurePort=0, ipcPort=34957, storageInfo=lv=-57;cid=testClusterID;nsid=482849681;c=1732678320802), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:32:04,053 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf 2024-11-27T03:32:04,118 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/zookeeper_0, clientPort=60515, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-27T03:32:04,127 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60515 2024-11-27T03:32:04,139 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:32:04,142 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:32:04,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:32:04,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:32:04,742 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba with version=8 2024-11-27T03:32:04,743 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/hbase-staging 2024-11-27T03:32:04,819 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-27T03:32:05,054 INFO [Time-limited test {}] client.ConnectionUtils(128): master/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:32:05,062 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:32:05,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:32:05,067 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:32:05,067 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:32:05,067 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:32:05,183 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-27T03:32:05,233 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-27T03:32:05,242 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-27T03:32:05,245 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:32:05,266 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 85369 (auto-detected) 2024-11-27T03:32:05,267 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-27T03:32:05,284 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33871 2024-11-27T03:32:05,303 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33871 connecting to ZooKeeper ensemble=127.0.0.1:60515 2024-11-27T03:32:05,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:338710x0, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:32:05,421 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33871-0x1017a590b310000 connected 2024-11-27T03:32:05,514 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:32:05,517 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:32:05,525 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:32:05,528 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba, hbase.cluster.distributed=false 2024-11-27T03:32:05,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:32:05,554 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33871 2024-11-27T03:32:05,554 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33871 2024-11-27T03:32:05,555 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33871 2024-11-27T03:32:05,555 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33871 2024-11-27T03:32:05,555 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33871 2024-11-27T03:32:05,656 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:32:05,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:32:05,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:32:05,659 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:32:05,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:32:05,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:32:05,662 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T03:32:05,664 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:32:05,665 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35649 2024-11-27T03:32:05,667 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35649 connecting to ZooKeeper ensemble=127.0.0.1:60515 2024-11-27T03:32:05,669 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:32:05,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:32:05,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:356490x0, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:32:05,690 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:356490x0, quorum=127.0.0.1:60515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:32:05,690 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35649-0x1017a590b310001 connected 2024-11-27T03:32:05,694 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-27T03:32:05,702 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-27T03:32:05,704 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T03:32:05,709 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:32:05,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35649 2024-11-27T03:32:05,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35649 2024-11-27T03:32:05,714 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35649 2024-11-27T03:32:05,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35649 2024-11-27T03:32:05,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35649 2024-11-27T03:32:05,733 DEBUG [M:0;66d3634da959:33871 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;66d3634da959:33871 2024-11-27T03:32:05,734 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/66d3634da959,33871,1732678324907 2024-11-27T03:32:05,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:32:05,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:32:05,755 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/66d3634da959,33871,1732678324907 2024-11-27T03:32:05,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T03:32:05,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:05,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:05,785 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T03:32:05,786 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/66d3634da959,33871,1732678324907 from backup master directory 2024-11-27T03:32:05,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/66d3634da959,33871,1732678324907 2024-11-27T03:32:05,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:32:05,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:32:05,795 WARN [master/66d3634da959:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:32:05,795 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=66d3634da959,33871,1732678324907 2024-11-27T03:32:05,797 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-27T03:32:05,798 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-27T03:32:05,860 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/hbase.id] with ID: b06170ab-3d3e-481f-8d23-3a56b9affa5a 2024-11-27T03:32:05,860 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/.tmp/hbase.id 2024-11-27T03:32:05,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:32:05,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:32:05,874 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/.tmp/hbase.id]:[hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/hbase.id] 2024-11-27T03:32:05,919 INFO [master/66d3634da959:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:32:05,925 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-27T03:32:05,944 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-27T03:32:05,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:05,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:05,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:32:05,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:32:05,984 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:32:05,985 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-27T03:32:05,991 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:32:06,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:32:06,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:32:06,037 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store 2024-11-27T03:32:06,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:32:06,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:32:06,063 INFO [master/66d3634da959:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-27T03:32:06,066 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:32:06,067 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:32:06,067 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:32:06,068 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:32:06,069 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:32:06,069 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:32:06,069 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:32:06,070 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678326067Disabling compacts and flushes for region at 1732678326067Disabling writes for close at 1732678326069 (+2 ms)Writing region close event to WAL at 1732678326069Closed at 1732678326069 2024-11-27T03:32:06,072 WARN [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/.initializing 2024-11-27T03:32:06,072 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/WALs/66d3634da959,33871,1732678324907 2024-11-27T03:32:06,092 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C33871%2C1732678324907, suffix=, logDir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/WALs/66d3634da959,33871,1732678324907, archiveDir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/oldWALs, maxLogs=10 2024-11-27T03:32:06,100 INFO [master/66d3634da959:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C33871%2C1732678324907.1732678326096 2024-11-27T03:32:06,117 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/WALs/66d3634da959,33871,1732678324907/66d3634da959%2C33871%2C1732678324907.1732678326096 2024-11-27T03:32:06,129 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36427:36427),(127.0.0.1/127.0.0.1:37045:37045)] 2024-11-27T03:32:06,131 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:32:06,131 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:32:06,134 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,135 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-27T03:32:06,197 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:06,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:32:06,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-27T03:32:06,204 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:06,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:32:06,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,208 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-27T03:32:06,208 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:06,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:32:06,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-27T03:32:06,213 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:06,215 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:32:06,215 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,219 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,220 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,226 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,226 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,230 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-27T03:32:06,234 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:32:06,238 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:32:06,239 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725765, jitterRate=-0.07714293897151947}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-27T03:32:06,247 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732678326146Initializing all the Stores at 1732678326149 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678326149Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678326150 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678326150Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678326150Cleaning up temporary data from old regions at 1732678326226 (+76 ms)Region opened successfully at 1732678326247 (+21 ms) 2024-11-27T03:32:06,249 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-27T03:32:06,283 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b64aa38, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:32:06,309 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-27T03:32:06,319 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-27T03:32:06,320 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-27T03:32:06,322 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-27T03:32:06,324 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-27T03:32:06,328 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-27T03:32:06,328 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-27T03:32:06,351 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-27T03:32:06,361 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-27T03:32:06,404 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-27T03:32:06,409 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-27T03:32:06,411 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-27T03:32:06,415 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-27T03:32:06,416 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-27T03:32:06,420 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-27T03:32:06,425 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-27T03:32:06,427 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-27T03:32:06,436 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-27T03:32:06,454 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-27T03:32:06,467 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-27T03:32:06,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:32:06,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:32:06,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:06,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:06,483 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=66d3634da959,33871,1732678324907, sessionid=0x1017a590b310000, setting cluster-up flag (Was=false) 2024-11-27T03:32:06,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:06,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:06,541 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-27T03:32:06,544 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,33871,1732678324907 2024-11-27T03:32:06,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:06,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:06,594 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-27T03:32:06,599 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,33871,1732678324907 2024-11-27T03:32:06,606 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-27T03:32:06,623 INFO [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(746): ClusterId : b06170ab-3d3e-481f-8d23-3a56b9affa5a 2024-11-27T03:32:06,625 DEBUG [RS:0;66d3634da959:35649 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-27T03:32:06,638 DEBUG [RS:0;66d3634da959:35649 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-27T03:32:06,638 DEBUG [RS:0;66d3634da959:35649 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-27T03:32:06,648 DEBUG [RS:0;66d3634da959:35649 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-27T03:32:06,649 DEBUG [RS:0;66d3634da959:35649 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8051a25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:32:06,667 DEBUG [RS:0;66d3634da959:35649 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;66d3634da959:35649 2024-11-27T03:32:06,670 INFO [RS:0;66d3634da959:35649 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-27T03:32:06,671 INFO [RS:0;66d3634da959:35649 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-27T03:32:06,671 DEBUG [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-27T03:32:06,673 INFO [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(2659): reportForDuty to master=66d3634da959,33871,1732678324907 with port=35649, startcode=1732678325623 2024-11-27T03:32:06,675 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-27T03:32:06,683 DEBUG [RS:0;66d3634da959:35649 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T03:32:06,683 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-27T03:32:06,689 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-27T03:32:06,694 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 66d3634da959,33871,1732678324907 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-27T03:32:06,701 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:32:06,701 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:32:06,701 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:32:06,702 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:32:06,702 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/66d3634da959:0, corePoolSize=10, maxPoolSize=10 2024-11-27T03:32:06,702 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,702 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:32:06,702 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,709 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:32:06,709 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732678356709 2024-11-27T03:32:06,709 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-27T03:32:06,710 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-27T03:32:06,712 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-27T03:32:06,714 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:06,715 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T03:32:06,716 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-27T03:32:06,716 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-27T03:32:06,717 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-27T03:32:06,717 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-27T03:32:06,720 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,728 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-27T03:32:06,730 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-27T03:32:06,730 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-27T03:32:06,732 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-27T03:32:06,733 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-27T03:32:06,737 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678326734,5,FailOnTimeoutGroup] 2024-11-27T03:32:06,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:32:06,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:32:06,742 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-27T03:32:06,743 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba 2024-11-27T03:32:06,746 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678326738,5,FailOnTimeoutGroup] 2024-11-27T03:32:06,746 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,746 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-27T03:32:06,747 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,748 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,755 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41301, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T03:32:06,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:32:06,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:32:06,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:32:06,764 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33871 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 66d3634da959,35649,1732678325623 2024-11-27T03:32:06,767 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33871 {}] master.ServerManager(517): Registering regionserver=66d3634da959,35649,1732678325623 2024-11-27T03:32:06,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:32:06,773 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:32:06,773 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:06,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:32:06,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:32:06,777 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:32:06,777 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:06,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:32:06,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:32:06,782 DEBUG [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba 2024-11-27T03:32:06,782 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:32:06,782 DEBUG [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45395 2024-11-27T03:32:06,782 DEBUG [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-27T03:32:06,782 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:06,784 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:32:06,784 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:32:06,787 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:32:06,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:06,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:32:06,788 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:32:06,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740 2024-11-27T03:32:06,790 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740 2024-11-27T03:32:06,792 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:32:06,792 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:32:06,793 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:32:06,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:32:06,794 DEBUG [RS:0;66d3634da959:35649 {}] zookeeper.ZKUtil(111): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d3634da959,35649,1732678325623 2024-11-27T03:32:06,795 WARN [RS:0;66d3634da959:35649 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:32:06,795 INFO [RS:0;66d3634da959:35649 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:32:06,796 DEBUG [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623 2024-11-27T03:32:06,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:32:06,798 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d3634da959,35649,1732678325623] 2024-11-27T03:32:06,801 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:32:06,802 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691102, jitterRate=-0.12121935188770294}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:32:06,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732678326764Initializing all the Stores at 1732678326766 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678326766Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678326769 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678326769Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678326769Cleaning up temporary data from old regions at 1732678326793 (+24 ms)Region opened successfully at 1732678326806 (+13 ms) 2024-11-27T03:32:06,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:32:06,806 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:32:06,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:32:06,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:32:06,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:32:06,808 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:32:06,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678326806Disabling compacts and flushes for region at 1732678326806Disabling writes for close at 1732678326807 (+1 ms)Writing region close event to WAL at 1732678326808 (+1 ms)Closed at 1732678326808 2024-11-27T03:32:06,811 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:32:06,812 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-27T03:32:06,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-27T03:32:06,826 INFO [RS:0;66d3634da959:35649 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-27T03:32:06,826 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:32:06,829 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-27T03:32:06,842 INFO [RS:0;66d3634da959:35649 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-27T03:32:06,846 INFO [RS:0;66d3634da959:35649 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T03:32:06,847 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,847 INFO [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-27T03:32:06,853 INFO [RS:0;66d3634da959:35649 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-27T03:32:06,854 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,854 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,854 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,854 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,854 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,855 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,855 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:32:06,855 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,855 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,855 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,855 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,855 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,855 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:32:06,856 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:32:06,856 DEBUG [RS:0;66d3634da959:35649 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:32:06,857 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,857 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,857 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,857 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,857 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,857 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,35649,1732678325623-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:32:06,873 INFO [RS:0;66d3634da959:35649 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-27T03:32:06,875 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,35649,1732678325623-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,875 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,875 INFO [RS:0;66d3634da959:35649 {}] regionserver.Replication(171): 66d3634da959,35649,1732678325623 started 2024-11-27T03:32:06,891 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:06,891 INFO [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(1482): Serving as 66d3634da959,35649,1732678325623, RpcServer on 66d3634da959/172.17.0.3:35649, sessionid=0x1017a590b310001 2024-11-27T03:32:06,892 DEBUG [RS:0;66d3634da959:35649 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-27T03:32:06,892 DEBUG [RS:0;66d3634da959:35649 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d3634da959,35649,1732678325623 2024-11-27T03:32:06,892 DEBUG [RS:0;66d3634da959:35649 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,35649,1732678325623' 2024-11-27T03:32:06,892 DEBUG [RS:0;66d3634da959:35649 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-27T03:32:06,893 DEBUG [RS:0;66d3634da959:35649 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-27T03:32:06,894 DEBUG [RS:0;66d3634da959:35649 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-27T03:32:06,894 DEBUG [RS:0;66d3634da959:35649 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-27T03:32:06,894 DEBUG [RS:0;66d3634da959:35649 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d3634da959,35649,1732678325623 2024-11-27T03:32:06,894 DEBUG [RS:0;66d3634da959:35649 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,35649,1732678325623' 2024-11-27T03:32:06,894 DEBUG [RS:0;66d3634da959:35649 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-27T03:32:06,895 DEBUG [RS:0;66d3634da959:35649 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-27T03:32:06,895 DEBUG [RS:0;66d3634da959:35649 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-27T03:32:06,895 INFO [RS:0;66d3634da959:35649 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-27T03:32:06,896 INFO [RS:0;66d3634da959:35649 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-27T03:32:06,980 WARN [66d3634da959:33871 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-27T03:32:07,007 INFO [RS:0;66d3634da959:35649 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C35649%2C1732678325623, suffix=, logDir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623, archiveDir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs, maxLogs=32 2024-11-27T03:32:07,010 INFO [RS:0;66d3634da959:35649 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C35649%2C1732678325623.1732678327009 2024-11-27T03:32:07,020 INFO [RS:0;66d3634da959:35649 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678327009 2024-11-27T03:32:07,024 DEBUG [RS:0;66d3634da959:35649 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36427:36427),(127.0.0.1/127.0.0.1:37045:37045)] 2024-11-27T03:32:07,237 DEBUG [66d3634da959:33871 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-27T03:32:07,252 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=66d3634da959,35649,1732678325623 2024-11-27T03:32:07,259 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,35649,1732678325623, state=OPENING 2024-11-27T03:32:07,309 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-27T03:32:07,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:07,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:32:07,322 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:32:07,322 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:32:07,325 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:32:07,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,35649,1732678325623}] 2024-11-27T03:32:07,505 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-27T03:32:07,508 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56687, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-27T03:32:07,518 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-27T03:32:07,518 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:32:07,522 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C35649%2C1732678325623.meta, suffix=.meta, logDir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623, archiveDir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs, maxLogs=32 2024-11-27T03:32:07,524 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C35649%2C1732678325623.meta.1732678327524.meta 2024-11-27T03:32:07,532 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.meta.1732678327524.meta 2024-11-27T03:32:07,535 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36427:36427),(127.0.0.1/127.0.0.1:37045:37045)] 2024-11-27T03:32:07,537 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:32:07,538 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-27T03:32:07,541 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-27T03:32:07,545 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-27T03:32:07,548 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-27T03:32:07,549 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:32:07,549 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-27T03:32:07,549 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-27T03:32:07,552 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:32:07,554 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:32:07,554 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:07,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:32:07,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:32:07,556 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:32:07,557 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:07,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:32:07,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:32:07,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:32:07,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:07,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:32:07,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:32:07,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:32:07,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:07,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:32:07,563 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:32:07,564 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740 2024-11-27T03:32:07,567 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740 2024-11-27T03:32:07,569 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:32:07,569 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:32:07,570 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:32:07,573 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:32:07,575 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783494, jitterRate=-0.0037360042333602905}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:32:07,575 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-27T03:32:07,577 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732678327550Writing region info on filesystem at 1732678327550Initializing all the Stores at 1732678327552 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678327552Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678327552Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678327552Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678327552Cleaning up temporary data from old regions at 1732678327569 (+17 ms)Running coprocessor post-open hooks at 1732678327575 (+6 ms)Region opened successfully at 1732678327576 (+1 ms) 2024-11-27T03:32:07,583 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732678327497 2024-11-27T03:32:07,595 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-27T03:32:07,596 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-27T03:32:07,597 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,35649,1732678325623 2024-11-27T03:32:07,600 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,35649,1732678325623, state=OPEN 2024-11-27T03:32:07,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:32:07,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:32:07,636 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:32:07,636 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:32:07,636 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=66d3634da959,35649,1732678325623 2024-11-27T03:32:07,642 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-27T03:32:07,642 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,35649,1732678325623 in 309 msec 2024-11-27T03:32:07,650 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-27T03:32:07,650 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 827 msec 2024-11-27T03:32:07,652 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:32:07,652 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-27T03:32:07,672 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:32:07,673 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,35649,1732678325623, seqNum=-1] 2024-11-27T03:32:07,691 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:32:07,693 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56341, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:32:07,714 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0760 sec 2024-11-27T03:32:07,714 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732678327714, completionTime=-1 2024-11-27T03:32:07,717 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-27T03:32:07,717 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-27T03:32:07,741 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-27T03:32:07,741 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732678387741 2024-11-27T03:32:07,741 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732678447741 2024-11-27T03:32:07,741 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 24 msec 2024-11-27T03:32:07,744 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33871,1732678324907-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:07,745 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33871,1732678324907-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:07,745 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33871,1732678324907-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:07,746 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-66d3634da959:33871, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:07,746 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:07,747 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:07,753 DEBUG [master/66d3634da959:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-27T03:32:07,776 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.981sec 2024-11-27T03:32:07,778 INFO [master/66d3634da959:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-27T03:32:07,779 INFO [master/66d3634da959:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-27T03:32:07,780 INFO [master/66d3634da959:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-27T03:32:07,780 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-27T03:32:07,780 INFO [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-27T03:32:07,781 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33871,1732678324907-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:32:07,782 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33871,1732678324907-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-27T03:32:07,790 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-27T03:32:07,791 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-27T03:32:07,791 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33871,1732678324907-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:32:07,831 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:32:07,834 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-27T03:32:07,834 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-27T03:32:07,837 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 66d3634da959,33871,-1 for getting cluster id 2024-11-27T03:32:07,839 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-27T03:32:07,847 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b06170ab-3d3e-481f-8d23-3a56b9affa5a' 2024-11-27T03:32:07,849 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-27T03:32:07,850 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b06170ab-3d3e-481f-8d23-3a56b9affa5a" 2024-11-27T03:32:07,852 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ce8903c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:32:07,852 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [66d3634da959,33871,-1] 2024-11-27T03:32:07,854 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-27T03:32:07,856 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:32:07,857 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41016, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-27T03:32:07,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:32:07,860 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:32:07,867 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,35649,1732678325623, seqNum=-1] 2024-11-27T03:32:07,868 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:32:07,870 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39254, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:32:07,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=66d3634da959,33871,1732678324907 2024-11-27T03:32:07,890 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:32:07,898 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-27T03:32:07,902 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-27T03:32:07,906 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 66d3634da959,33871,1732678324907 2024-11-27T03:32:07,909 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3ac11723 2024-11-27T03:32:07,910 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T03:32:07,913 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41018, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T03:32:07,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33871 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-27T03:32:07,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33871 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-27T03:32:07,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33871 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:32:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33871 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-27T03:32:07,928 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T03:32:07,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33871 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-27T03:32:07,931 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:07,933 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T03:32:07,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33871 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-27T03:32:07,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741835_1011 (size=389) 2024-11-27T03:32:07,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741835_1011 (size=389) 2024-11-27T03:32:07,979 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 136d10994744c40c090331422234e748, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba 2024-11-27T03:32:07,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741836_1012 (size=72) 2024-11-27T03:32:07,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741836_1012 (size=72) 2024-11-27T03:32:08,393 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:32:08,394 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 136d10994744c40c090331422234e748, disabling compactions & flushes 2024-11-27T03:32:08,394 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:32:08,394 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:32:08,394 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. after waiting 0 ms 2024-11-27T03:32:08,394 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:32:08,394 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:32:08,395 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 136d10994744c40c090331422234e748: Waiting for close lock at 1732678328394Disabling compacts and flushes for region at 1732678328394Disabling writes for close at 1732678328394Writing region close event to WAL at 1732678328394Closed at 1732678328394 2024-11-27T03:32:08,399 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T03:32:08,408 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732678328400"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732678328400"}]},"ts":"1732678328400"} 2024-11-27T03:32:08,414 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-27T03:32:08,416 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T03:32:08,418 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732678328416"}]},"ts":"1732678328416"} 2024-11-27T03:32:08,423 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-27T03:32:08,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=136d10994744c40c090331422234e748, ASSIGN}] 2024-11-27T03:32:08,427 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=136d10994744c40c090331422234e748, ASSIGN 2024-11-27T03:32:08,429 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=136d10994744c40c090331422234e748, ASSIGN; state=OFFLINE, location=66d3634da959,35649,1732678325623; forceNewPlan=false, retain=false 2024-11-27T03:32:08,581 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=136d10994744c40c090331422234e748, regionState=OPENING, regionLocation=66d3634da959,35649,1732678325623 2024-11-27T03:32:08,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=136d10994744c40c090331422234e748, ASSIGN because future has completed 2024-11-27T03:32:08,589 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 136d10994744c40c090331422234e748, server=66d3634da959,35649,1732678325623}] 2024-11-27T03:32:08,753 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:32:08,753 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 136d10994744c40c090331422234e748, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:32:08,754 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 136d10994744c40c090331422234e748 2024-11-27T03:32:08,754 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:32:08,754 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 136d10994744c40c090331422234e748 2024-11-27T03:32:08,754 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 136d10994744c40c090331422234e748 2024-11-27T03:32:08,757 INFO [StoreOpener-136d10994744c40c090331422234e748-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 136d10994744c40c090331422234e748 2024-11-27T03:32:08,760 INFO [StoreOpener-136d10994744c40c090331422234e748-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 136d10994744c40c090331422234e748 columnFamilyName info 2024-11-27T03:32:08,760 DEBUG [StoreOpener-136d10994744c40c090331422234e748-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:32:08,762 INFO [StoreOpener-136d10994744c40c090331422234e748-1 {}] regionserver.HStore(327): Store=136d10994744c40c090331422234e748/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:32:08,762 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 136d10994744c40c090331422234e748 2024-11-27T03:32:08,763 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748 2024-11-27T03:32:08,764 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748 2024-11-27T03:32:08,765 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 136d10994744c40c090331422234e748 2024-11-27T03:32:08,765 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 136d10994744c40c090331422234e748 2024-11-27T03:32:08,768 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 136d10994744c40c090331422234e748 2024-11-27T03:32:08,771 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:32:08,772 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 136d10994744c40c090331422234e748; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809865, jitterRate=0.029797658324241638}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-27T03:32:08,772 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 136d10994744c40c090331422234e748 2024-11-27T03:32:08,773 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 136d10994744c40c090331422234e748: Running coprocessor pre-open hook at 1732678328755Writing region info on filesystem at 1732678328755Initializing all the Stores at 1732678328757 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678328757Cleaning up temporary data from old regions at 1732678328765 (+8 ms)Running coprocessor post-open hooks at 1732678328772 (+7 ms)Region opened successfully at 1732678328773 (+1 ms) 2024-11-27T03:32:08,775 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748., pid=6, masterSystemTime=1732678328744 2024-11-27T03:32:08,779 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:32:08,779 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:32:08,781 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=136d10994744c40c090331422234e748, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,35649,1732678325623 2024-11-27T03:32:08,785 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 136d10994744c40c090331422234e748, server=66d3634da959,35649,1732678325623 because future has completed 2024-11-27T03:32:08,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-27T03:32:08,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 136d10994744c40c090331422234e748, server=66d3634da959,35649,1732678325623 in 198 msec 2024-11-27T03:32:08,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-27T03:32:08,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=136d10994744c40c090331422234e748, ASSIGN in 367 msec 2024-11-27T03:32:08,799 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T03:32:08,799 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732678328799"}]},"ts":"1732678328799"} 2024-11-27T03:32:08,803 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-27T03:32:08,806 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T03:32:08,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 885 msec 2024-11-27T03:32:13,065 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-27T03:32:13,122 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-27T03:32:13,124 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-27T03:32:15,232 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-27T03:32:15,233 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-27T03:32:15,239 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-27T03:32:15,239 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-27T03:32:15,243 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:32:15,243 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-27T03:32:15,244 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-27T03:32:15,244 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-27T03:32:17,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33871 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-27T03:32:17,998 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-27T03:32:18,005 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-27T03:32:18,013 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-27T03:32:18,014 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:32:18,015 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C35649%2C1732678325623.1732678338015 2024-11-27T03:32:18,023 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:18,023 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:18,023 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:18,023 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:18,023 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:18,024 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678327009 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678338015 2024-11-27T03:32:18,025 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37045:37045),(127.0.0.1/127.0.0.1:36427:36427)] 2024-11-27T03:32:18,025 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678327009 is not closed yet, will try archiving it next time 2024-11-27T03:32:18,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741833_1009 (size=451) 2024-11-27T03:32:18,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741833_1009 (size=451) 2024-11-27T03:32:18,029 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678327009 to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs/66d3634da959%2C35649%2C1732678325623.1732678327009 2024-11-27T03:32:18,039 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748., hostname=66d3634da959,35649,1732678325623, seqNum=2] 2024-11-27T03:32:30,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35649 {}] regionserver.HRegion(8855): Flush requested on 136d10994744c40c090331422234e748 2024-11-27T03:32:30,116 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 136d10994744c40c090331422234e748 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:32:30,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/6707084687794bd98dca2e0b09219b17 is 1080, key is row0001/info:/1732678338042/Put/seqid=0 2024-11-27T03:32:30,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741838_1014 (size=12509) 2024-11-27T03:32:30,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741838_1014 (size=12509) 2024-11-27T03:32:30,185 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/6707084687794bd98dca2e0b09219b17 2024-11-27T03:32:30,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/6707084687794bd98dca2e0b09219b17 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/6707084687794bd98dca2e0b09219b17 2024-11-27T03:32:30,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/6707084687794bd98dca2e0b09219b17, entries=7, sequenceid=11, filesize=12.2 K 2024-11-27T03:32:30,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 136d10994744c40c090331422234e748 in 128ms, sequenceid=11, compaction requested=false 2024-11-27T03:32:30,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 136d10994744c40c090331422234e748: 2024-11-27T03:32:34,050 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T03:32:38,136 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C35649%2C1732678325623.1732678358135 2024-11-27T03:32:38,353 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:32:38,354 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:38,354 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:38,354 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:38,354 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:38,354 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:38,355 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678338015 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678358135 2024-11-27T03:32:38,355 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36427:36427),(127.0.0.1/127.0.0.1:37045:37045)] 2024-11-27T03:32:38,356 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678338015 is not closed yet, will try archiving it next time 2024-11-27T03:32:38,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741837_1013 (size=12399) 2024-11-27T03:32:38,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741837_1013 (size=12399) 2024-11-27T03:32:38,561 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:40,770 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:42,979 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:45,187 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:45,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35649 {}] regionserver.HRegion(8855): Flush requested on 136d10994744c40c090331422234e748 2024-11-27T03:32:45,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 136d10994744c40c090331422234e748 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:32:45,392 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:45,407 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/4ef7f7abd79448aeb4d4dcf5e78e1255 is 1080, key is row0008/info:/1732678352117/Put/seqid=0 2024-11-27T03:32:45,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741840_1016 (size=12509) 2024-11-27T03:32:45,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741840_1016 (size=12509) 2024-11-27T03:32:45,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/4ef7f7abd79448aeb4d4dcf5e78e1255 2024-11-27T03:32:45,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/4ef7f7abd79448aeb4d4dcf5e78e1255 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/4ef7f7abd79448aeb4d4dcf5e78e1255 2024-11-27T03:32:45,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/4ef7f7abd79448aeb4d4dcf5e78e1255, entries=7, sequenceid=21, filesize=12.2 K 2024-11-27T03:32:45,639 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:45,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 136d10994744c40c090331422234e748 in 452ms, sequenceid=21, compaction requested=false 2024-11-27T03:32:45,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 136d10994744c40c090331422234e748: 2024-11-27T03:32:45,640 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-27T03:32:45,640 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:32:45,641 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/6707084687794bd98dca2e0b09219b17 because midkey is the same as first or last row 2024-11-27T03:32:47,395 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:47,795 INFO [master/66d3634da959:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-27T03:32:47,795 INFO [master/66d3634da959:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-27T03:32:49,605 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:49,611 WARN [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:49,612 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C35649%2C1732678325623:(num 1732678358135) roll requested 2024-11-27T03:32:49,613 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C35649%2C1732678325623.1732678369613 2024-11-27T03:32:49,826 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:49,827 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:49,827 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:49,827 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:49,827 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:49,827 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:32:49,827 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678358135 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678369613 2024-11-27T03:32:49,828 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36427:36427),(127.0.0.1/127.0.0.1:37045:37045)] 2024-11-27T03:32:49,828 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678358135 is not closed yet, will try archiving it next time 2024-11-27T03:32:49,829 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678338015 to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs/66d3634da959%2C35649%2C1732678325623.1732678338015 2024-11-27T03:32:49,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741839_1015 (size=7739) 2024-11-27T03:32:49,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741839_1015 (size=7739) 2024-11-27T03:32:51,814 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:53,754 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 136d10994744c40c090331422234e748, had cached 0 bytes from a total of 25018 2024-11-27T03:32:54,020 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:56,226 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:32:58,435 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:33:00,439 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-27T03:33:00,439 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C35649%2C1732678325623.1732678380439 2024-11-27T03:33:04,050 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T03:33:05,455 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:33:05,458 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK], DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK]] 2024-11-27T03:33:05,458 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C35649%2C1732678325623:(num 1732678380439) roll requested 2024-11-27T03:33:05,458 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:05,458 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:05,459 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:05,459 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:05,459 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:05,460 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678369613 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678380439 2024-11-27T03:33:05,461 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37045:37045),(127.0.0.1/127.0.0.1:36427:36427)] 2024-11-27T03:33:05,461 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678369613 is not closed yet, will try archiving it next time 2024-11-27T03:33:05,461 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C35649%2C1732678325623.1732678385461 2024-11-27T03:33:05,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741841_1017 (size=4753) 2024-11-27T03:33:05,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741841_1017 (size=4753) 2024-11-27T03:33:10,516 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5052 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:33:10,516 WARN [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5052 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:33:10,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35649 {}] regionserver.HRegion(8855): Flush requested on 136d10994744c40c090331422234e748 2024-11-27T03:33:10,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 136d10994744c40c090331422234e748 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:33:10,525 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5061 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:33:10,525 WARN [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5061 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:33:12,519 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-27T03:33:15,573 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5052 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:33:15,573 WARN [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5052 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:33:15,573 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:15,574 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:15,574 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:15,574 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:15,574 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:15,575 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678380439 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678385461 2024-11-27T03:33:15,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741842_1018 (size=1569) 2024-11-27T03:33:15,580 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37045:37045),(127.0.0.1/127.0.0.1:36427:36427)] 2024-11-27T03:33:15,580 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678380439 is not closed yet, will try archiving it next time 2024-11-27T03:33:15,580 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C35649%2C1732678325623:(num 1732678385461) roll requested 2024-11-27T03:33:15,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741842_1018 (size=1569) 2024-11-27T03:33:15,581 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C35649%2C1732678325623.1732678395580 2024-11-27T03:33:15,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/80552530274947d39a7693ce29626b54 is 1080, key is row0015/info:/1732678367191/Put/seqid=0 2024-11-27T03:33:15,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741844_1020 (size=12509) 2024-11-27T03:33:15,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741844_1020 (size=12509) 2024-11-27T03:33:15,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/80552530274947d39a7693ce29626b54 2024-11-27T03:33:15,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/80552530274947d39a7693ce29626b54 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/80552530274947d39a7693ce29626b54 2024-11-27T03:33:15,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/80552530274947d39a7693ce29626b54, entries=7, sequenceid=31, filesize=12.2 K 2024-11-27T03:33:20,617 INFO [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:33:20,617 WARN [FSHLog-0-hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba-prefix:66d3634da959,35649,1732678325623 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:33:20,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 136d10994744c40c090331422234e748 in 10100ms, sequenceid=31, compaction requested=true 2024-11-27T03:33:20,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 136d10994744c40c090331422234e748: 2024-11-27T03:33:20,618 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-27T03:33:20,618 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:33:20,618 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/6707084687794bd98dca2e0b09219b17 because midkey is the same as first or last row 2024-11-27T03:33:20,621 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 136d10994744c40c090331422234e748:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:33:20,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:33:20,626 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:33:20,630 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:33:20,631 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.HStore(1541): 136d10994744c40c090331422234e748/info is initiating minor compaction (all files) 2024-11-27T03:33:20,631 INFO [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 136d10994744c40c090331422234e748/info in TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:33:20,632 INFO [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/6707084687794bd98dca2e0b09219b17, hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/4ef7f7abd79448aeb4d4dcf5e78e1255, hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/80552530274947d39a7693ce29626b54] into tmpdir=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp, totalSize=36.6 K 2024-11-27T03:33:20,633 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6707084687794bd98dca2e0b09219b17, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732678338042 2024-11-27T03:33:20,633 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ef7f7abd79448aeb4d4dcf5e78e1255, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732678352117 2024-11-27T03:33:20,634 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] compactions.Compactor(225): Compacting 80552530274947d39a7693ce29626b54, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732678367191 2024-11-27T03:33:20,648 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5065 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:33:20,648 WARN [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5065 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36337,DS-14b6d21e-21f4-44be-a2c9-4f681f40a014,DISK], DatanodeInfoWithStorage[127.0.0.1:39445,DS-d29c6a91-133e-4dc1-82be-295986de0303,DISK]] 2024-11-27T03:33:20,648 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,649 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,649 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,649 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,649 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,649 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678385461 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678395580 2024-11-27T03:33:20,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741843_1019 (size=438) 2024-11-27T03:33:20,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741843_1019 (size=438) 2024-11-27T03:33:20,652 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678358135 to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs/66d3634da959%2C35649%2C1732678325623.1732678358135 2024-11-27T03:33:20,654 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678369613 to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs/66d3634da959%2C35649%2C1732678325623.1732678369613 2024-11-27T03:33:20,655 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37045:37045),(127.0.0.1/127.0.0.1:36427:36427)] 2024-11-27T03:33:20,655 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C35649%2C1732678325623:(num 1732678400655) roll requested 2024-11-27T03:33:20,655 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C35649%2C1732678325623.1732678400655 2024-11-27T03:33:20,656 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678380439 to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs/66d3634da959%2C35649%2C1732678325623.1732678380439 2024-11-27T03:33:20,658 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678385461 to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs/66d3634da959%2C35649%2C1732678325623.1732678385461 2024-11-27T03:33:20,663 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,663 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,663 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,663 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,663 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,663 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678395580 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678400655 2024-11-27T03:33:20,665 INFO [RS:0;66d3634da959:35649-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 136d10994744c40c090331422234e748#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:33:20,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741845_1021 (size=93) 2024-11-27T03:33:20,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741845_1021 (size=93) 2024-11-27T03:33:20,666 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/1be79d2531b546cd9e931adefb52a4e0 is 1080, key is row0001/info:/1732678338042/Put/seqid=0 2024-11-27T03:33:20,667 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678395580 to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs/66d3634da959%2C35649%2C1732678325623.1732678395580 2024-11-27T03:33:20,669 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37045:37045),(127.0.0.1/127.0.0.1:36427:36427)] 2024-11-27T03:33:20,670 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C35649%2C1732678325623.1732678400670 2024-11-27T03:33:20,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741847_1023 (size=27710) 2024-11-27T03:33:20,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741847_1023 (size=27710) 2024-11-27T03:33:20,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,680 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,680 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:20,681 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678400655 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678400670 2024-11-27T03:33:20,682 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37045:37045),(127.0.0.1/127.0.0.1:36427:36427)] 2024-11-27T03:33:20,682 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/WALs/66d3634da959,35649,1732678325623/66d3634da959%2C35649%2C1732678325623.1732678400655 is not closed yet, will try archiving it next time 2024-11-27T03:33:20,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741846_1022 (size=1258) 2024-11-27T03:33:20,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741846_1022 (size=1258) 2024-11-27T03:33:20,688 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/1be79d2531b546cd9e931adefb52a4e0 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/1be79d2531b546cd9e931adefb52a4e0 2024-11-27T03:33:20,705 INFO [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 136d10994744c40c090331422234e748/info of 136d10994744c40c090331422234e748 into 1be79d2531b546cd9e931adefb52a4e0(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:33:20,705 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 136d10994744c40c090331422234e748: 2024-11-27T03:33:20,707 INFO [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748., storeName=136d10994744c40c090331422234e748/info, priority=13, startTime=1732678400621; duration=0sec 2024-11-27T03:33:20,707 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-27T03:33:20,707 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:33:20,707 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/1be79d2531b546cd9e931adefb52a4e0 because midkey is the same as first or last row 2024-11-27T03:33:20,708 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-27T03:33:20,708 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:33:20,708 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/1be79d2531b546cd9e931adefb52a4e0 because midkey is the same as first or last row 2024-11-27T03:33:20,708 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-27T03:33:20,708 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:33:20,708 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/1be79d2531b546cd9e931adefb52a4e0 because midkey is the same as first or last row 2024-11-27T03:33:20,708 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:33:20,708 DEBUG [RS:0;66d3634da959:35649-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 136d10994744c40c090331422234e748:info 2024-11-27T03:33:32,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35649 {}] regionserver.HRegion(8855): Flush requested on 136d10994744c40c090331422234e748 2024-11-27T03:33:32,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 136d10994744c40c090331422234e748 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:33:32,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/686a919561dc4d4da3a644eee40ef15f is 1080, key is row0022/info:/1732678400671/Put/seqid=0 2024-11-27T03:33:32,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741849_1025 (size=12509) 2024-11-27T03:33:32,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741849_1025 (size=12509) 2024-11-27T03:33:32,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/686a919561dc4d4da3a644eee40ef15f 2024-11-27T03:33:32,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/686a919561dc4d4da3a644eee40ef15f as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/686a919561dc4d4da3a644eee40ef15f 2024-11-27T03:33:32,743 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/686a919561dc4d4da3a644eee40ef15f, entries=7, sequenceid=42, filesize=12.2 K 2024-11-27T03:33:32,744 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 136d10994744c40c090331422234e748 in 37ms, sequenceid=42, compaction requested=false 2024-11-27T03:33:32,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 136d10994744c40c090331422234e748: 2024-11-27T03:33:32,744 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-27T03:33:32,744 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:33:32,744 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/1be79d2531b546cd9e931adefb52a4e0 because midkey is the same as first or last row 2024-11-27T03:33:34,051 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T03:33:38,755 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 136d10994744c40c090331422234e748, had cached 0 bytes from a total of 40219 2024-11-27T03:33:40,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-27T03:33:40,734 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:33:40,735 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:33:40,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:40,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:40,746 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-27T03:33:40,746 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-27T03:33:40,746 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1590533472, stopped=false 2024-11-27T03:33:40,746 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=66d3634da959,33871,1732678324907 2024-11-27T03:33:40,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:33:40,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:33:40,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:40,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:40,810 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:33:40,811 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:33:40,811 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:33:40,811 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:40,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:33:40,812 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:33:40,812 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '66d3634da959,35649,1732678325623' ***** 2024-11-27T03:33:40,812 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-27T03:33:40,813 INFO [RS:0;66d3634da959:35649 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-27T03:33:40,813 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-27T03:33:40,814 INFO [RS:0;66d3634da959:35649 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-27T03:33:40,814 INFO [RS:0;66d3634da959:35649 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-27T03:33:40,815 INFO [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(3091): Received CLOSE for 136d10994744c40c090331422234e748 2024-11-27T03:33:40,816 INFO [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(959): stopping server 66d3634da959,35649,1732678325623 2024-11-27T03:33:40,816 INFO [RS:0;66d3634da959:35649 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:33:40,816 INFO [RS:0;66d3634da959:35649 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;66d3634da959:35649. 2024-11-27T03:33:40,816 DEBUG [RS:0;66d3634da959:35649 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:33:40,816 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 136d10994744c40c090331422234e748, disabling compactions & flushes 2024-11-27T03:33:40,816 DEBUG [RS:0;66d3634da959:35649 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:40,816 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:33:40,817 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:33:40,817 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. after waiting 0 ms 2024-11-27T03:33:40,817 INFO [RS:0;66d3634da959:35649 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-27T03:33:40,817 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:33:40,817 INFO [RS:0;66d3634da959:35649 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-27T03:33:40,817 INFO [RS:0;66d3634da959:35649 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-27T03:33:40,817 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 136d10994744c40c090331422234e748 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-27T03:33:40,817 INFO [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-27T03:33:40,818 INFO [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-27T03:33:40,818 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:33:40,818 DEBUG [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(1325): Online Regions={136d10994744c40c090331422234e748=TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748., 1588230740=hbase:meta,,1.1588230740} 2024-11-27T03:33:40,818 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:33:40,818 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:33:40,818 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:33:40,818 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:33:40,819 DEBUG [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(1351): Waiting on 136d10994744c40c090331422234e748, 1588230740 2024-11-27T03:33:40,819 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-27T03:33:40,824 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/a3b9b7c1c2ab4730af09f638c3dd3a70 is 1080, key is row0029/info:/1732678414712/Put/seqid=0 2024-11-27T03:33:40,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741850_1026 (size=8193) 2024-11-27T03:33:40,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741850_1026 (size=8193) 2024-11-27T03:33:40,831 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/a3b9b7c1c2ab4730af09f638c3dd3a70 2024-11-27T03:33:40,841 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/.tmp/info/cbeba4f05c9f4deaad985419e01fb914 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748./info:regioninfo/1732678328780/Put/seqid=0 2024-11-27T03:33:40,842 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/.tmp/info/a3b9b7c1c2ab4730af09f638c3dd3a70 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/a3b9b7c1c2ab4730af09f638c3dd3a70 2024-11-27T03:33:40,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741851_1027 (size=7016) 2024-11-27T03:33:40,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741851_1027 (size=7016) 2024-11-27T03:33:40,852 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/.tmp/info/cbeba4f05c9f4deaad985419e01fb914 2024-11-27T03:33:40,852 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/a3b9b7c1c2ab4730af09f638c3dd3a70, entries=3, sequenceid=48, filesize=8.0 K 2024-11-27T03:33:40,854 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 136d10994744c40c090331422234e748 in 37ms, sequenceid=48, compaction requested=true 2024-11-27T03:33:40,855 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/6707084687794bd98dca2e0b09219b17, hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/4ef7f7abd79448aeb4d4dcf5e78e1255, hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/80552530274947d39a7693ce29626b54] to archive 2024-11-27T03:33:40,857 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T03:33:40,861 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/6707084687794bd98dca2e0b09219b17 to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/archive/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/6707084687794bd98dca2e0b09219b17 2024-11-27T03:33:40,863 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/4ef7f7abd79448aeb4d4dcf5e78e1255 to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/archive/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/4ef7f7abd79448aeb4d4dcf5e78e1255 2024-11-27T03:33:40,865 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/80552530274947d39a7693ce29626b54 to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/archive/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/info/80552530274947d39a7693ce29626b54 2024-11-27T03:33:40,875 INFO [regionserver/66d3634da959:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:33:40,878 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/.tmp/ns/2b429eee13ca4c51a1e18cd94a9e6956 is 43, key is default/ns:d/1732678327697/Put/seqid=0 2024-11-27T03:33:40,875 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=66d3634da959:33871 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-27T03:33:40,879 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6707084687794bd98dca2e0b09219b17=12509, 4ef7f7abd79448aeb4d4dcf5e78e1255=12509, 80552530274947d39a7693ce29626b54=12509] 2024-11-27T03:33:40,884 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/default/TestLogRolling-testSlowSyncLogRolling/136d10994744c40c090331422234e748/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-27T03:33:40,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741852_1028 (size=5153) 2024-11-27T03:33:40,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741852_1028 (size=5153) 2024-11-27T03:33:40,885 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/.tmp/ns/2b429eee13ca4c51a1e18cd94a9e6956 2024-11-27T03:33:40,887 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:33:40,887 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 136d10994744c40c090331422234e748: Waiting for close lock at 1732678420816Running coprocessor pre-close hooks at 1732678420816Disabling compacts and flushes for region at 1732678420816Disabling writes for close at 1732678420817 (+1 ms)Obtaining lock to block concurrent updates at 1732678420817Preparing flush snapshotting stores in 136d10994744c40c090331422234e748 at 1732678420817Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732678420818 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. at 1732678420819 (+1 ms)Flushing 136d10994744c40c090331422234e748/info: creating writer at 1732678420819Flushing 136d10994744c40c090331422234e748/info: appending metadata at 1732678420823 (+4 ms)Flushing 136d10994744c40c090331422234e748/info: closing flushed file at 1732678420824 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29c04ffa: reopening flushed file at 1732678420841 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 136d10994744c40c090331422234e748 in 37ms, sequenceid=48, compaction requested=true at 1732678420854 (+13 ms)Writing region close event to WAL at 1732678420880 (+26 ms)Running coprocessor post-close hooks at 1732678420885 (+5 ms)Closed at 1732678420887 (+2 ms) 2024-11-27T03:33:40,888 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732678327915.136d10994744c40c090331422234e748. 2024-11-27T03:33:40,910 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/.tmp/table/7560800c34084389afa65d8587ca9352 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732678328799/Put/seqid=0 2024-11-27T03:33:40,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741853_1029 (size=5396) 2024-11-27T03:33:40,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741853_1029 (size=5396) 2024-11-27T03:33:40,916 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/.tmp/table/7560800c34084389afa65d8587ca9352 2024-11-27T03:33:40,925 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/.tmp/info/cbeba4f05c9f4deaad985419e01fb914 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/info/cbeba4f05c9f4deaad985419e01fb914 2024-11-27T03:33:40,934 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/info/cbeba4f05c9f4deaad985419e01fb914, entries=10, sequenceid=11, filesize=6.9 K 2024-11-27T03:33:40,935 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/.tmp/ns/2b429eee13ca4c51a1e18cd94a9e6956 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/ns/2b429eee13ca4c51a1e18cd94a9e6956 2024-11-27T03:33:40,942 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/ns/2b429eee13ca4c51a1e18cd94a9e6956, entries=2, sequenceid=11, filesize=5.0 K 2024-11-27T03:33:40,944 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/.tmp/table/7560800c34084389afa65d8587ca9352 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/table/7560800c34084389afa65d8587ca9352 2024-11-27T03:33:40,947 INFO [regionserver/66d3634da959:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-27T03:33:40,947 INFO [regionserver/66d3634da959:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-27T03:33:40,951 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/table/7560800c34084389afa65d8587ca9352, entries=2, sequenceid=11, filesize=5.3 K 2024-11-27T03:33:40,953 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false 2024-11-27T03:33:40,958 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-27T03:33:40,959 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:33:40,959 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:33:40,960 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678420818Running coprocessor pre-close hooks at 1732678420818Disabling compacts and flushes for region at 1732678420818Disabling writes for close at 1732678420818Obtaining lock to block concurrent updates at 1732678420819 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732678420819Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732678420819Flushing stores of hbase:meta,,1.1588230740 at 1732678420820 (+1 ms)Flushing 1588230740/info: creating writer at 1732678420821 (+1 ms)Flushing 1588230740/info: appending metadata at 1732678420841 (+20 ms)Flushing 1588230740/info: closing flushed file at 1732678420841Flushing 1588230740/ns: creating writer at 1732678420862 (+21 ms)Flushing 1588230740/ns: appending metadata at 1732678420877 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732678420878 (+1 ms)Flushing 1588230740/table: creating writer at 1732678420894 (+16 ms)Flushing 1588230740/table: appending metadata at 1732678420909 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732678420909Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e1f3d39: reopening flushed file at 1732678420924 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f1f3f43: reopening flushed file at 1732678420934 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@136b69f3: reopening flushed file at 1732678420942 (+8 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false at 1732678420953 (+11 ms)Writing region close event to WAL at 1732678420954 (+1 ms)Running coprocessor post-close hooks at 1732678420959 (+5 ms)Closed at 1732678420959 2024-11-27T03:33:40,960 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-27T03:33:41,019 INFO [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(976): stopping server 66d3634da959,35649,1732678325623; all regions closed. 2024-11-27T03:33:41,022 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,022 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,022 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,023 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,023 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741834_1010 (size=3066) 2024-11-27T03:33:41,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741834_1010 (size=3066) 2024-11-27T03:33:41,035 DEBUG [RS:0;66d3634da959:35649 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs 2024-11-27T03:33:41,035 INFO [RS:0;66d3634da959:35649 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C35649%2C1732678325623.meta:.meta(num 1732678327524) 2024-11-27T03:33:41,035 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,036 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,036 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,036 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,036 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741848_1024 (size=12695) 2024-11-27T03:33:41,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741848_1024 (size=12695) 2024-11-27T03:33:41,043 DEBUG [RS:0;66d3634da959:35649 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/oldWALs 2024-11-27T03:33:41,043 INFO [RS:0;66d3634da959:35649 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C35649%2C1732678325623:(num 1732678400670) 2024-11-27T03:33:41,043 DEBUG [RS:0;66d3634da959:35649 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:41,043 INFO [RS:0;66d3634da959:35649 {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:33:41,043 INFO [RS:0;66d3634da959:35649 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:33:41,043 INFO [RS:0;66d3634da959:35649 {}] hbase.ChoreService(370): Chore service for: regionserver/66d3634da959:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-27T03:33:41,043 INFO [RS:0;66d3634da959:35649 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:33:41,043 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:33:41,044 INFO [RS:0;66d3634da959:35649 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35649 2024-11-27T03:33:41,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d3634da959,35649,1732678325623 2024-11-27T03:33:41,052 INFO [RS:0;66d3634da959:35649 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:33:41,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:33:41,063 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d3634da959,35649,1732678325623] 2024-11-27T03:33:41,073 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/66d3634da959,35649,1732678325623 already deleted, retry=false 2024-11-27T03:33:41,073 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 66d3634da959,35649,1732678325623 expired; onlineServers=0 2024-11-27T03:33:41,073 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '66d3634da959,33871,1732678324907' ***** 2024-11-27T03:33:41,074 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-27T03:33:41,074 INFO [M:0;66d3634da959:33871 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:33:41,074 INFO [M:0;66d3634da959:33871 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:33:41,074 DEBUG [M:0;66d3634da959:33871 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-27T03:33:41,074 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-27T03:33:41,074 DEBUG [M:0;66d3634da959:33871 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-27T03:33:41,075 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678326734 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678326734,5,FailOnTimeoutGroup] 2024-11-27T03:33:41,075 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678326738 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678326738,5,FailOnTimeoutGroup] 2024-11-27T03:33:41,075 INFO [M:0;66d3634da959:33871 {}] hbase.ChoreService(370): Chore service for: master/66d3634da959:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-27T03:33:41,075 INFO [M:0;66d3634da959:33871 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:33:41,075 DEBUG [M:0;66d3634da959:33871 {}] master.HMaster(1795): Stopping service threads 2024-11-27T03:33:41,075 INFO [M:0;66d3634da959:33871 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-27T03:33:41,076 INFO [M:0;66d3634da959:33871 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:33:41,076 INFO [M:0;66d3634da959:33871 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-27T03:33:41,077 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-27T03:33:41,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-27T03:33:41,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:41,084 DEBUG [M:0;66d3634da959:33871 {}] zookeeper.ZKUtil(347): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-27T03:33:41,084 WARN [M:0;66d3634da959:33871 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-27T03:33:41,086 INFO [M:0;66d3634da959:33871 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/.lastflushedseqids 2024-11-27T03:33:41,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741854_1030 (size=130) 2024-11-27T03:33:41,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741854_1030 (size=130) 2024-11-27T03:33:41,102 INFO [M:0;66d3634da959:33871 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-27T03:33:41,103 INFO [M:0;66d3634da959:33871 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-27T03:33:41,103 DEBUG [M:0;66d3634da959:33871 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:33:41,103 INFO [M:0;66d3634da959:33871 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:41,103 DEBUG [M:0;66d3634da959:33871 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:41,103 DEBUG [M:0;66d3634da959:33871 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:33:41,103 DEBUG [M:0;66d3634da959:33871 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:41,103 INFO [M:0;66d3634da959:33871 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-27T03:33:41,126 DEBUG [M:0;66d3634da959:33871 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7fff3bfa97d418689e3758570ef8c95 is 82, key is hbase:meta,,1/info:regioninfo/1732678327597/Put/seqid=0 2024-11-27T03:33:41,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741855_1031 (size=5672) 2024-11-27T03:33:41,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741855_1031 (size=5672) 2024-11-27T03:33:41,133 INFO [M:0;66d3634da959:33871 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7fff3bfa97d418689e3758570ef8c95 2024-11-27T03:33:41,155 DEBUG [M:0;66d3634da959:33871 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c8e5722de03241c7a2611c8714a8b57c is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732678328809/Put/seqid=0 2024-11-27T03:33:41,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741856_1032 (size=6247) 2024-11-27T03:33:41,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741856_1032 (size=6247) 2024-11-27T03:33:41,161 INFO [M:0;66d3634da959:33871 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c8e5722de03241c7a2611c8714a8b57c 2024-11-27T03:33:41,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:33:41,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35649-0x1017a590b310001, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:33:41,164 INFO [RS:0;66d3634da959:35649 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:33:41,164 INFO [RS:0;66d3634da959:35649 {}] regionserver.HRegionServer(1031): Exiting; stopping=66d3634da959,35649,1732678325623; zookeeper connection closed. 2024-11-27T03:33:41,164 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@297b461e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@297b461e 2024-11-27T03:33:41,165 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-27T03:33:41,168 INFO [M:0;66d3634da959:33871 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c8e5722de03241c7a2611c8714a8b57c 2024-11-27T03:33:41,189 DEBUG [M:0;66d3634da959:33871 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dc4f176c0a0b42d3ba877ee4bf567607 is 69, key is 66d3634da959,35649,1732678325623/rs:state/1732678326770/Put/seqid=0 2024-11-27T03:33:41,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741857_1033 (size=5156) 2024-11-27T03:33:41,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741857_1033 (size=5156) 2024-11-27T03:33:41,195 INFO [M:0;66d3634da959:33871 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dc4f176c0a0b42d3ba877ee4bf567607 2024-11-27T03:33:41,217 DEBUG [M:0;66d3634da959:33871 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/68c6c4cee4d14160a48ee4f5b8379e34 is 52, key is load_balancer_on/state:d/1732678327895/Put/seqid=0 2024-11-27T03:33:41,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741858_1034 (size=5056) 2024-11-27T03:33:41,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741858_1034 (size=5056) 2024-11-27T03:33:41,224 INFO [M:0;66d3634da959:33871 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/68c6c4cee4d14160a48ee4f5b8379e34 2024-11-27T03:33:41,233 DEBUG [M:0;66d3634da959:33871 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e7fff3bfa97d418689e3758570ef8c95 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e7fff3bfa97d418689e3758570ef8c95 2024-11-27T03:33:41,240 INFO [M:0;66d3634da959:33871 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e7fff3bfa97d418689e3758570ef8c95, entries=8, sequenceid=59, filesize=5.5 K 2024-11-27T03:33:41,242 DEBUG [M:0;66d3634da959:33871 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c8e5722de03241c7a2611c8714a8b57c as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c8e5722de03241c7a2611c8714a8b57c 2024-11-27T03:33:41,250 INFO [M:0;66d3634da959:33871 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c8e5722de03241c7a2611c8714a8b57c 2024-11-27T03:33:41,250 INFO [M:0;66d3634da959:33871 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c8e5722de03241c7a2611c8714a8b57c, entries=6, sequenceid=59, filesize=6.1 K 2024-11-27T03:33:41,252 DEBUG [M:0;66d3634da959:33871 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dc4f176c0a0b42d3ba877ee4bf567607 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dc4f176c0a0b42d3ba877ee4bf567607 2024-11-27T03:33:41,259 INFO [M:0;66d3634da959:33871 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dc4f176c0a0b42d3ba877ee4bf567607, entries=1, sequenceid=59, filesize=5.0 K 2024-11-27T03:33:41,261 DEBUG [M:0;66d3634da959:33871 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/68c6c4cee4d14160a48ee4f5b8379e34 as hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/68c6c4cee4d14160a48ee4f5b8379e34 2024-11-27T03:33:41,270 INFO [M:0;66d3634da959:33871 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/68c6c4cee4d14160a48ee4f5b8379e34, entries=1, sequenceid=59, filesize=4.9 K 2024-11-27T03:33:41,272 INFO [M:0;66d3634da959:33871 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 168ms, sequenceid=59, compaction requested=false 2024-11-27T03:33:41,273 INFO [M:0;66d3634da959:33871 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:41,273 DEBUG [M:0;66d3634da959:33871 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678421103Disabling compacts and flushes for region at 1732678421103Disabling writes for close at 1732678421103Obtaining lock to block concurrent updates at 1732678421103Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732678421103Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732678421104 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732678421105 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732678421105Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732678421125 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732678421125Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732678421138 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732678421154 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732678421154Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732678421168 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732678421188 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732678421188Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732678421202 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732678421216 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732678421217 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3659f5fa: reopening flushed file at 1732678421231 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@533fb98: reopening flushed file at 1732678421240 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7350a356: reopening flushed file at 1732678421250 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36e7c983: reopening flushed file at 1732678421259 (+9 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 168ms, sequenceid=59, compaction requested=false at 1732678421272 (+13 ms)Writing region close event to WAL at 1732678421273 (+1 ms)Closed at 1732678421273 2024-11-27T03:33:41,274 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,274 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,275 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,275 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,275 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:41,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36337 is added to blk_1073741830_1006 (size=27973) 2024-11-27T03:33:41,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39445 is added to blk_1073741830_1006 (size=27973) 2024-11-27T03:33:41,278 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:33:41,278 INFO [M:0;66d3634da959:33871 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-27T03:33:41,278 INFO [M:0;66d3634da959:33871 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33871 2024-11-27T03:33:41,279 INFO [M:0;66d3634da959:33871 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:33:41,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:33:41,392 INFO [M:0;66d3634da959:33871 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:33:41,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33871-0x1017a590b310000, quorum=127.0.0.1:60515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:33:41,428 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:33:41,430 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:33:41,431 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:33:41,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:33:41,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/hadoop.log.dir/,STOPPED} 2024-11-27T03:33:41,434 WARN [BP-1803410911-172.17.0.3-1732678320802 heartbeating to localhost/127.0.0.1:45395 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:33:41,434 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:33:41,434 WARN [BP-1803410911-172.17.0.3-1732678320802 heartbeating to localhost/127.0.0.1:45395 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1803410911-172.17.0.3-1732678320802 (Datanode Uuid d88a7120-d1af-4a1f-9828-3e5c578e8aae) service to localhost/127.0.0.1:45395 2024-11-27T03:33:41,434 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:33:41,435 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/data/data3/current/BP-1803410911-172.17.0.3-1732678320802 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:33:41,435 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/data/data4/current/BP-1803410911-172.17.0.3-1732678320802 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:33:41,436 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:33:41,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:33:41,440 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:33:41,441 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:33:41,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:33:41,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/hadoop.log.dir/,STOPPED} 2024-11-27T03:33:41,443 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:33:41,443 WARN [BP-1803410911-172.17.0.3-1732678320802 heartbeating to localhost/127.0.0.1:45395 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:33:41,443 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:33:41,443 WARN [BP-1803410911-172.17.0.3-1732678320802 heartbeating to localhost/127.0.0.1:45395 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1803410911-172.17.0.3-1732678320802 (Datanode Uuid 8da65dfb-4ab0-4cb2-a6d4-57e861d2aa13) service to localhost/127.0.0.1:45395 2024-11-27T03:33:41,444 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/data/data1/current/BP-1803410911-172.17.0.3-1732678320802 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:33:41,444 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/cluster_5ae3eb20-aff9-4f2a-ba7b-f15526c0fea8/data/data2/current/BP-1803410911-172.17.0.3-1732678320802 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:33:41,444 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:33:41,453 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:33:41,453 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:33:41,453 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:33:41,454 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:33:41,454 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/hadoop.log.dir/,STOPPED} 2024-11-27T03:33:41,463 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-27T03:33:41,494 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-27T03:33:41,502 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45395 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45395 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45395 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45395 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: master/66d3634da959:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/66d3634da959:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45395 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45395 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/66d3634da959:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:45395 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@4d9953b6 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45395 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=93 (was 292), ProcessCount=11 (was 11), AvailableMemoryMB=8437 (was 9008) 2024-11-27T03:33:41,508 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=93, ProcessCount=11, AvailableMemoryMB=8437 2024-11-27T03:33:41,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-27T03:33:41,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/hadoop.log.dir so I do NOT create it in target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773 2024-11-27T03:33:41,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a553479c-9052-66f3-91d5-9be9e78808bf/hadoop.tmp.dir so I do NOT create it in target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773 2024-11-27T03:33:41,508 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b, deleteOnExit=true 2024-11-27T03:33:41,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-27T03:33:41,508 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/test.cache.data in system properties and HBase conf 2024-11-27T03:33:41,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/hadoop.tmp.dir in system properties and HBase conf 2024-11-27T03:33:41,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/hadoop.log.dir in system properties and HBase conf 2024-11-27T03:33:41,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-27T03:33:41,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-27T03:33:41,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-27T03:33:41,509 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-27T03:33:41,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:33:41,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:33:41,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-27T03:33:41,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:33:41,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-27T03:33:41,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-27T03:33:41,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:33:41,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:33:41,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-27T03:33:41,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/nfs.dump.dir in system properties and HBase conf 2024-11-27T03:33:41,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/java.io.tmpdir in system properties and HBase conf 2024-11-27T03:33:41,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:33:41,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-27T03:33:41,510 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-27T03:33:41,524 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:33:41,884 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:33:41,890 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:33:41,891 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:33:41,891 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:33:41,891 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:33:41,892 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:33:41,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:33:41,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:33:41,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55cb1221{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/java.io.tmpdir/jetty-localhost-41961-hadoop-hdfs-3_4_1-tests_jar-_-any-2068697064075021191/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:33:41,991 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:41961} 2024-11-27T03:33:41,991 INFO [Time-limited test {}] server.Server(415): Started @103035ms 2024-11-27T03:33:42,003 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:33:42,280 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:33:42,284 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:33:42,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:33:42,285 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:33:42,285 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:33:42,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:33:42,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:33:42,381 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4595827f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/java.io.tmpdir/jetty-localhost-39865-hadoop-hdfs-3_4_1-tests_jar-_-any-14703729656570300409/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:33:42,382 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:39865} 2024-11-27T03:33:42,382 INFO [Time-limited test {}] server.Server(415): Started @103426ms 2024-11-27T03:33:42,384 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:33:42,420 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:33:42,424 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:33:42,425 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:33:42,425 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:33:42,425 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:33:42,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:33:42,426 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:33:42,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@da5059a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/java.io.tmpdir/jetty-localhost-46209-hadoop-hdfs-3_4_1-tests_jar-_-any-7562946337409335203/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:33:42,520 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:46209} 2024-11-27T03:33:42,520 INFO [Time-limited test {}] server.Server(415): Started @103564ms 2024-11-27T03:33:42,522 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:33:43,693 WARN [Thread-455 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/data/data3/current/BP-97074840-172.17.0.3-1732678421535/current, will proceed with Du for space computation calculation, 2024-11-27T03:33:43,693 WARN [Thread-456 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/data/data4/current/BP-97074840-172.17.0.3-1732678421535/current, will proceed with Du for space computation calculation, 2024-11-27T03:33:43,694 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/data/data1/current/BP-97074840-172.17.0.3-1732678421535/current, will proceed with Du for space computation calculation, 2024-11-27T03:33:43,694 WARN [Thread-458 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/data/data2/current/BP-97074840-172.17.0.3-1732678421535/current, will proceed with Du for space computation calculation, 2024-11-27T03:33:43,716 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:33:43,716 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:33:43,718 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x82f6b3d173a73f14 with lease ID 0xadc4ba1d65d1ac8b: Processing first storage report for DS-1d1f1780-c05b-4b29-bce9-ba6e26fa5cc5 from datanode DatanodeRegistration(127.0.0.1:44499, datanodeUuid=97022696-7be4-4934-9bf4-c1a73e6a2ce1, infoPort=35401, infoSecurePort=0, ipcPort=37491, storageInfo=lv=-57;cid=testClusterID;nsid=1798124821;c=1732678421535) 2024-11-27T03:33:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x82f6b3d173a73f14 with lease ID 0xadc4ba1d65d1ac8b: from storage DS-1d1f1780-c05b-4b29-bce9-ba6e26fa5cc5 node DatanodeRegistration(127.0.0.1:44499, datanodeUuid=97022696-7be4-4934-9bf4-c1a73e6a2ce1, infoPort=35401, infoSecurePort=0, ipcPort=37491, storageInfo=lv=-57;cid=testClusterID;nsid=1798124821;c=1732678421535), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:33:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6c8dc6924049325b with lease ID 0xadc4ba1d65d1ac8c: Processing first storage report for DS-893de0a3-6cf3-4a36-b371-136f5aa8f4d2 from datanode DatanodeRegistration(127.0.0.1:41361, datanodeUuid=b5332970-86ac-451a-9f97-a3aab13b1027, infoPort=35047, infoSecurePort=0, ipcPort=40907, storageInfo=lv=-57;cid=testClusterID;nsid=1798124821;c=1732678421535) 2024-11-27T03:33:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6c8dc6924049325b with lease ID 0xadc4ba1d65d1ac8c: from storage DS-893de0a3-6cf3-4a36-b371-136f5aa8f4d2 node DatanodeRegistration(127.0.0.1:41361, datanodeUuid=b5332970-86ac-451a-9f97-a3aab13b1027, infoPort=35047, infoSecurePort=0, ipcPort=40907, storageInfo=lv=-57;cid=testClusterID;nsid=1798124821;c=1732678421535), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:33:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x82f6b3d173a73f14 with lease ID 0xadc4ba1d65d1ac8b: Processing first storage report for DS-6919cf41-bd62-4cfc-bf8b-4a5dbb2039f5 from datanode DatanodeRegistration(127.0.0.1:44499, datanodeUuid=97022696-7be4-4934-9bf4-c1a73e6a2ce1, infoPort=35401, infoSecurePort=0, ipcPort=37491, storageInfo=lv=-57;cid=testClusterID;nsid=1798124821;c=1732678421535) 2024-11-27T03:33:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x82f6b3d173a73f14 with lease ID 0xadc4ba1d65d1ac8b: from storage DS-6919cf41-bd62-4cfc-bf8b-4a5dbb2039f5 node DatanodeRegistration(127.0.0.1:44499, datanodeUuid=97022696-7be4-4934-9bf4-c1a73e6a2ce1, infoPort=35401, infoSecurePort=0, ipcPort=37491, storageInfo=lv=-57;cid=testClusterID;nsid=1798124821;c=1732678421535), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:33:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6c8dc6924049325b with lease ID 0xadc4ba1d65d1ac8c: Processing first storage report for DS-9edbd580-d929-451e-8e4b-8640b4cc66a6 from datanode DatanodeRegistration(127.0.0.1:41361, datanodeUuid=b5332970-86ac-451a-9f97-a3aab13b1027, infoPort=35047, infoSecurePort=0, ipcPort=40907, storageInfo=lv=-57;cid=testClusterID;nsid=1798124821;c=1732678421535) 2024-11-27T03:33:43,719 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6c8dc6924049325b with lease ID 0xadc4ba1d65d1ac8c: from storage DS-9edbd580-d929-451e-8e4b-8640b4cc66a6 node DatanodeRegistration(127.0.0.1:41361, datanodeUuid=b5332970-86ac-451a-9f97-a3aab13b1027, infoPort=35047, infoSecurePort=0, ipcPort=40907, storageInfo=lv=-57;cid=testClusterID;nsid=1798124821;c=1732678421535), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:33:43,771 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773 2024-11-27T03:33:43,774 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/zookeeper_0, clientPort=56485, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-27T03:33:43,775 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56485 2024-11-27T03:33:43,776 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:43,777 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:43,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:33:43,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:33:43,788 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861 with version=8 2024-11-27T03:33:43,788 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/hbase-staging 2024-11-27T03:33:43,791 INFO [Time-limited test {}] client.ConnectionUtils(128): master/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:33:43,791 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:43,791 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:43,791 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:33:43,791 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:43,791 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:33:43,791 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-27T03:33:43,791 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:33:43,792 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33925 2024-11-27T03:33:43,794 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33925 connecting to ZooKeeper ensemble=127.0.0.1:56485 2024-11-27T03:33:43,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:339250x0, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:33:43,866 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33925-0x1017a5a90890000 connected 2024-11-27T03:33:43,969 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:43,974 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:43,981 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:33:43,981 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861, hbase.cluster.distributed=false 2024-11-27T03:33:43,983 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:33:43,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33925 2024-11-27T03:33:43,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33925 2024-11-27T03:33:43,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33925 2024-11-27T03:33:43,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33925 2024-11-27T03:33:43,985 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33925 2024-11-27T03:33:44,003 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:33:44,003 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:44,004 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:44,004 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:33:44,004 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:44,004 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:33:44,004 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T03:33:44,004 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:33:44,005 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34539 2024-11-27T03:33:44,006 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34539 connecting to ZooKeeper ensemble=127.0.0.1:56485 2024-11-27T03:33:44,007 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:44,009 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:44,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:345390x0, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:33:44,021 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:33:44,021 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34539-0x1017a5a90890001 connected 2024-11-27T03:33:44,021 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-27T03:33:44,022 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-27T03:33:44,023 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T03:33:44,025 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:33:44,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34539 2024-11-27T03:33:44,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34539 2024-11-27T03:33:44,028 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34539 2024-11-27T03:33:44,028 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34539 2024-11-27T03:33:44,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34539 2024-11-27T03:33:44,043 DEBUG [M:0;66d3634da959:33925 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;66d3634da959:33925 2024-11-27T03:33:44,044 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/66d3634da959,33925,1732678423790 2024-11-27T03:33:44,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:33:44,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:33:44,053 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/66d3634da959,33925,1732678423790 2024-11-27T03:33:44,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T03:33:44,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,063 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T03:33:44,063 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/66d3634da959,33925,1732678423790 from backup master directory 2024-11-27T03:33:44,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/66d3634da959,33925,1732678423790 2024-11-27T03:33:44,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:33:44,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:33:44,073 WARN [master/66d3634da959:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:33:44,073 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=66d3634da959,33925,1732678423790 2024-11-27T03:33:44,078 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/hbase.id] with ID: 1956c0a6-5b0f-40a1-be2a-55cfa086ce24 2024-11-27T03:33:44,078 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/.tmp/hbase.id 2024-11-27T03:33:44,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:33:44,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:33:44,085 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/.tmp/hbase.id]:[hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/hbase.id] 2024-11-27T03:33:44,099 INFO [master/66d3634da959:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:44,099 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-27T03:33:44,101 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-27T03:33:44,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:33:44,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:33:44,121 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:33:44,122 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-27T03:33:44,123 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:33:44,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:33:44,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:33:44,133 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store 2024-11-27T03:33:44,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:33:44,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:33:44,143 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:33:44,143 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:33:44,144 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:44,144 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:44,144 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:33:44,144 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:44,144 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:44,144 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678424143Disabling compacts and flushes for region at 1732678424143Disabling writes for close at 1732678424144 (+1 ms)Writing region close event to WAL at 1732678424144Closed at 1732678424144 2024-11-27T03:33:44,145 WARN [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/.initializing 2024-11-27T03:33:44,145 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/WALs/66d3634da959,33925,1732678423790 2024-11-27T03:33:44,149 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C33925%2C1732678423790, suffix=, logDir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/WALs/66d3634da959,33925,1732678423790, archiveDir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/oldWALs, maxLogs=10 2024-11-27T03:33:44,150 INFO [master/66d3634da959:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C33925%2C1732678423790.1732678424149 2024-11-27T03:33:44,155 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/WALs/66d3634da959,33925,1732678423790/66d3634da959%2C33925%2C1732678423790.1732678424149 2024-11-27T03:33:44,159 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35401:35401),(127.0.0.1/127.0.0.1:35047:35047)] 2024-11-27T03:33:44,160 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:33:44,160 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:33:44,161 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,161 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,163 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,164 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-27T03:33:44,165 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:44,165 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:44,166 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-27T03:33:44,168 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:44,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:33:44,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-27T03:33:44,171 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:44,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:33:44,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-27T03:33:44,174 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:44,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:33:44,175 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,176 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,176 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,178 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,178 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,178 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-27T03:33:44,180 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:44,183 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:33:44,183 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737418, jitterRate=-0.062325626611709595}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-27T03:33:44,184 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732678424161Initializing all the Stores at 1732678424162 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678424162Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678424162Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678424163 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678424163Cleaning up temporary data from old regions at 1732678424178 (+15 ms)Region opened successfully at 1732678424184 (+6 ms) 2024-11-27T03:33:44,185 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-27T03:33:44,189 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ac207f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:33:44,190 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-27T03:33:44,191 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-27T03:33:44,191 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-27T03:33:44,191 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-27T03:33:44,192 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-27T03:33:44,192 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-27T03:33:44,192 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-27T03:33:44,195 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-27T03:33:44,197 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-27T03:33:44,210 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-27T03:33:44,210 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-27T03:33:44,211 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-27T03:33:44,220 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-27T03:33:44,221 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-27T03:33:44,222 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-27T03:33:44,231 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-27T03:33:44,232 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-27T03:33:44,241 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-27T03:33:44,243 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-27T03:33:44,252 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-27T03:33:44,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:33:44,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:33:44,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,263 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=66d3634da959,33925,1732678423790, sessionid=0x1017a5a90890000, setting cluster-up flag (Was=false) 2024-11-27T03:33:44,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,315 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-27T03:33:44,320 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,33925,1732678423790 2024-11-27T03:33:44,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,379 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-27T03:33:44,384 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,33925,1732678423790 2024-11-27T03:33:44,388 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-27T03:33:44,391 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-27T03:33:44,392 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-27T03:33:44,392 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-27T03:33:44,392 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 66d3634da959,33925,1732678423790 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-27T03:33:44,394 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:33:44,394 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:33:44,394 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:33:44,394 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:33:44,394 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/66d3634da959:0, corePoolSize=10, maxPoolSize=10 2024-11-27T03:33:44,395 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,395 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:33:44,395 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,396 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732678454396 2024-11-27T03:33:44,396 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-27T03:33:44,396 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-27T03:33:44,396 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-27T03:33:44,396 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-27T03:33:44,397 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-27T03:33:44,397 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-27T03:33:44,397 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,397 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:33:44,397 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-27T03:33:44,397 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-27T03:33:44,397 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-27T03:33:44,397 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-27T03:33:44,398 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-27T03:33:44,398 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-27T03:33:44,398 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678424398,5,FailOnTimeoutGroup] 2024-11-27T03:33:44,398 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678424398,5,FailOnTimeoutGroup] 2024-11-27T03:33:44,398 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:44,398 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,398 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-27T03:33:44,399 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,399 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,398 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T03:33:44,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:33:44,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:33:44,407 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-27T03:33:44,408 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861 2024-11-27T03:33:44,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:33:44,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:33:44,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:33:44,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:33:44,421 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:33:44,421 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:44,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:44,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:33:44,424 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:33:44,424 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:44,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:44,424 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:33:44,426 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:33:44,426 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:44,426 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:44,426 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:33:44,428 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:33:44,428 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:44,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:44,429 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:33:44,429 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740 2024-11-27T03:33:44,430 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740 2024-11-27T03:33:44,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:33:44,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:33:44,432 INFO [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(746): ClusterId : 1956c0a6-5b0f-40a1-be2a-55cfa086ce24 2024-11-27T03:33:44,432 DEBUG [RS:0;66d3634da959:34539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-27T03:33:44,432 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:33:44,434 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:33:44,437 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:33:44,438 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805429, jitterRate=0.024156108498573303}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:33:44,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732678424418Initializing all the Stores at 1732678424419 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678424419Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678424419Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678424419Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678424419Cleaning up temporary data from old regions at 1732678424431 (+12 ms)Region opened successfully at 1732678424439 (+8 ms) 2024-11-27T03:33:44,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:33:44,440 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:33:44,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:33:44,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:33:44,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:33:44,440 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:33:44,441 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678424439Disabling compacts and flushes for region at 1732678424439Disabling writes for close at 1732678424440 (+1 ms)Writing region close event to WAL at 1732678424440Closed at 1732678424440 2024-11-27T03:33:44,442 DEBUG [RS:0;66d3634da959:34539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-27T03:33:44,442 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:33:44,442 DEBUG [RS:0;66d3634da959:34539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-27T03:33:44,442 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-27T03:33:44,443 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-27T03:33:44,445 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:33:44,446 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-27T03:33:44,453 DEBUG [RS:0;66d3634da959:34539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-27T03:33:44,453 DEBUG [RS:0;66d3634da959:34539 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f60118f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:33:44,466 DEBUG [RS:0;66d3634da959:34539 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;66d3634da959:34539 2024-11-27T03:33:44,466 INFO [RS:0;66d3634da959:34539 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-27T03:33:44,466 INFO [RS:0;66d3634da959:34539 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-27T03:33:44,467 DEBUG [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-27T03:33:44,468 INFO [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(2659): reportForDuty to master=66d3634da959,33925,1732678423790 with port=34539, startcode=1732678424003 2024-11-27T03:33:44,468 DEBUG [RS:0;66d3634da959:34539 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T03:33:44,471 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36957, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T03:33:44,471 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33925 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 66d3634da959,34539,1732678424003 2024-11-27T03:33:44,471 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33925 {}] master.ServerManager(517): Registering regionserver=66d3634da959,34539,1732678424003 2024-11-27T03:33:44,474 DEBUG [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861 2024-11-27T03:33:44,474 DEBUG [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39527 2024-11-27T03:33:44,474 DEBUG [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-27T03:33:44,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:33:44,484 DEBUG [RS:0;66d3634da959:34539 {}] zookeeper.ZKUtil(111): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d3634da959,34539,1732678424003 2024-11-27T03:33:44,484 WARN [RS:0;66d3634da959:34539 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:33:44,484 INFO [RS:0;66d3634da959:34539 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:33:44,484 DEBUG [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/WALs/66d3634da959,34539,1732678424003 2024-11-27T03:33:44,485 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d3634da959,34539,1732678424003] 2024-11-27T03:33:44,489 INFO [RS:0;66d3634da959:34539 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-27T03:33:44,491 INFO [RS:0;66d3634da959:34539 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-27T03:33:44,492 INFO [RS:0;66d3634da959:34539 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T03:33:44,492 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,492 INFO [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-27T03:33:44,493 INFO [RS:0;66d3634da959:34539 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-27T03:33:44,493 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,493 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,493 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,493 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,493 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,493 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,494 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:33:44,494 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,494 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,494 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,494 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,494 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,494 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:44,494 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:33:44,494 DEBUG [RS:0;66d3634da959:34539 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:33:44,495 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,495 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,495 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,495 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,495 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,495 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,34539,1732678424003-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:33:44,510 INFO [RS:0;66d3634da959:34539 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-27T03:33:44,510 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,34539,1732678424003-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,510 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,510 INFO [RS:0;66d3634da959:34539 {}] regionserver.Replication(171): 66d3634da959,34539,1732678424003 started 2024-11-27T03:33:44,525 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:44,525 INFO [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(1482): Serving as 66d3634da959,34539,1732678424003, RpcServer on 66d3634da959/172.17.0.3:34539, sessionid=0x1017a5a90890001 2024-11-27T03:33:44,525 DEBUG [RS:0;66d3634da959:34539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-27T03:33:44,525 DEBUG [RS:0;66d3634da959:34539 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d3634da959,34539,1732678424003 2024-11-27T03:33:44,526 DEBUG [RS:0;66d3634da959:34539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,34539,1732678424003' 2024-11-27T03:33:44,526 DEBUG [RS:0;66d3634da959:34539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-27T03:33:44,526 DEBUG [RS:0;66d3634da959:34539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-27T03:33:44,527 DEBUG [RS:0;66d3634da959:34539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-27T03:33:44,527 DEBUG [RS:0;66d3634da959:34539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-27T03:33:44,527 DEBUG [RS:0;66d3634da959:34539 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d3634da959,34539,1732678424003 2024-11-27T03:33:44,527 DEBUG [RS:0;66d3634da959:34539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,34539,1732678424003' 2024-11-27T03:33:44,527 DEBUG [RS:0;66d3634da959:34539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-27T03:33:44,528 DEBUG [RS:0;66d3634da959:34539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-27T03:33:44,528 DEBUG [RS:0;66d3634da959:34539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-27T03:33:44,528 INFO [RS:0;66d3634da959:34539 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-27T03:33:44,528 INFO [RS:0;66d3634da959:34539 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-27T03:33:44,596 WARN [66d3634da959:33925 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-27T03:33:44,631 INFO [RS:0;66d3634da959:34539 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C34539%2C1732678424003, suffix=, logDir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/WALs/66d3634da959,34539,1732678424003, archiveDir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/oldWALs, maxLogs=32 2024-11-27T03:33:44,633 INFO [RS:0;66d3634da959:34539 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C34539%2C1732678424003.1732678424633 2024-11-27T03:33:44,640 INFO [RS:0;66d3634da959:34539 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/WALs/66d3634da959,34539,1732678424003/66d3634da959%2C34539%2C1732678424003.1732678424633 2024-11-27T03:33:44,641 DEBUG [RS:0;66d3634da959:34539 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35047:35047),(127.0.0.1/127.0.0.1:35401:35401)] 2024-11-27T03:33:44,847 DEBUG [66d3634da959:33925 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-27T03:33:44,849 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=66d3634da959,34539,1732678424003 2024-11-27T03:33:44,853 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,34539,1732678424003, state=OPENING 2024-11-27T03:33:44,905 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-27T03:33:44,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:44,917 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:33:44,917 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:33:44,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,34539,1732678424003}] 2024-11-27T03:33:44,917 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:33:45,074 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-27T03:33:45,081 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34005, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-27T03:33:45,087 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-27T03:33:45,088 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:33:45,090 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C34539%2C1732678424003.meta, suffix=.meta, logDir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/WALs/66d3634da959,34539,1732678424003, archiveDir=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/oldWALs, maxLogs=32 2024-11-27T03:33:45,093 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C34539%2C1732678424003.meta.1732678425093.meta 2024-11-27T03:33:45,099 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/WALs/66d3634da959,34539,1732678424003/66d3634da959%2C34539%2C1732678424003.meta.1732678425093.meta 2024-11-27T03:33:45,100 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35401:35401),(127.0.0.1/127.0.0.1:35047:35047)] 2024-11-27T03:33:45,101 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:33:45,101 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-27T03:33:45,101 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-27T03:33:45,101 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-27T03:33:45,101 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-27T03:33:45,102 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:33:45,102 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-27T03:33:45,102 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-27T03:33:45,103 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:33:45,104 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:33:45,104 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:45,105 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:45,105 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:33:45,106 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:33:45,106 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:45,106 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:45,107 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:33:45,107 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:33:45,107 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:45,108 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:45,108 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:33:45,109 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:33:45,109 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:45,109 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:45,110 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:33:45,111 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740 2024-11-27T03:33:45,112 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740 2024-11-27T03:33:45,114 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:33:45,114 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:33:45,114 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:33:45,116 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:33:45,117 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835410, jitterRate=0.062279656529426575}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:33:45,118 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-27T03:33:45,119 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732678425102Writing region info on filesystem at 1732678425102Initializing all the Stores at 1732678425103 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678425103Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678425103Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678425103Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678425103Cleaning up temporary data from old regions at 1732678425114 (+11 ms)Running coprocessor post-open hooks at 1732678425118 (+4 ms)Region opened successfully at 1732678425118 2024-11-27T03:33:45,120 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732678425074 2024-11-27T03:33:45,123 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-27T03:33:45,123 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-27T03:33:45,124 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,34539,1732678424003 2024-11-27T03:33:45,125 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,34539,1732678424003, state=OPEN 2024-11-27T03:33:45,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:33:45,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:33:45,165 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=66d3634da959,34539,1732678424003 2024-11-27T03:33:45,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:33:45,165 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:33:45,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-27T03:33:45,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,34539,1732678424003 in 248 msec 2024-11-27T03:33:45,174 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-27T03:33:45,174 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 727 msec 2024-11-27T03:33:45,175 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:33:45,175 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-27T03:33:45,177 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:33:45,177 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,34539,1732678424003, seqNum=-1] 2024-11-27T03:33:45,178 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:33:45,180 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51145, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:33:45,190 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 798 msec 2024-11-27T03:33:45,190 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732678425190, completionTime=-1 2024-11-27T03:33:45,190 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-27T03:33:45,190 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-27T03:33:45,192 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-27T03:33:45,193 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732678485192 2024-11-27T03:33:45,193 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732678545193 2024-11-27T03:33:45,193 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-27T03:33:45,193 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33925,1732678423790-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:45,193 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33925,1732678423790-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:45,193 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33925,1732678423790-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:45,193 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-66d3634da959:33925, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:45,193 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:45,194 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:45,195 DEBUG [master/66d3634da959:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-27T03:33:45,198 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.125sec 2024-11-27T03:33:45,199 INFO [master/66d3634da959:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-27T03:33:45,199 INFO [master/66d3634da959:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-27T03:33:45,199 INFO [master/66d3634da959:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-27T03:33:45,199 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-27T03:33:45,199 INFO [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-27T03:33:45,199 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33925,1732678423790-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:33:45,199 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33925,1732678423790-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-27T03:33:45,202 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-27T03:33:45,202 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-27T03:33:45,202 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,33925,1732678423790-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:45,230 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-27T03:33:45,232 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b9379ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:33:45,232 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 66d3634da959,33925,-1 for getting cluster id 2024-11-27T03:33:45,232 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-27T03:33:45,234 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1956c0a6-5b0f-40a1-be2a-55cfa086ce24' 2024-11-27T03:33:45,235 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-27T03:33:45,235 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1956c0a6-5b0f-40a1-be2a-55cfa086ce24" 2024-11-27T03:33:45,235 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e2defca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:33:45,236 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [66d3634da959,33925,-1] 2024-11-27T03:33:45,236 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-27T03:33:45,236 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:45,238 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56096, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-27T03:33:45,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3aadf114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:33:45,241 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:33:45,242 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,34539,1732678424003, seqNum=-1] 2024-11-27T03:33:45,243 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:33:45,246 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44306, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:33:45,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=66d3634da959,33925,1732678423790 2024-11-27T03:33:45,249 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:45,252 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-27T03:33:45,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-27T03:33:45,252 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:33:45,252 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:33:45,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:45,253 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:45,253 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-27T03:33:45,253 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-27T03:33:45,253 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=599502189, stopped=false 2024-11-27T03:33:45,253 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=66d3634da959,33925,1732678423790 2024-11-27T03:33:45,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:33:45,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:33:45,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:45,273 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:45,273 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:33:45,273 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:33:45,274 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:33:45,274 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:33:45,274 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:33:45,274 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:45,274 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '66d3634da959,34539,1732678424003' ***** 2024-11-27T03:33:45,274 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-27T03:33:45,274 INFO [RS:0;66d3634da959:34539 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-27T03:33:45,275 INFO [RS:0;66d3634da959:34539 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-27T03:33:45,275 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-27T03:33:45,275 INFO [RS:0;66d3634da959:34539 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-27T03:33:45,275 INFO [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(959): stopping server 66d3634da959,34539,1732678424003 2024-11-27T03:33:45,275 INFO [RS:0;66d3634da959:34539 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:33:45,275 INFO [RS:0;66d3634da959:34539 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;66d3634da959:34539. 2024-11-27T03:33:45,275 DEBUG [RS:0;66d3634da959:34539 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:33:45,275 DEBUG [RS:0;66d3634da959:34539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:45,275 INFO [RS:0;66d3634da959:34539 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-27T03:33:45,275 INFO [RS:0;66d3634da959:34539 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-27T03:33:45,275 INFO [RS:0;66d3634da959:34539 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-27T03:33:45,275 INFO [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-27T03:33:45,275 INFO [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-27T03:33:45,276 DEBUG [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-27T03:33:45,276 DEBUG [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-27T03:33:45,276 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:33:45,276 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:33:45,276 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:33:45,276 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:33:45,276 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:33:45,276 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-27T03:33:45,295 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740/.tmp/ns/6315f42ee2bf4fbd8db6084c0d203dae is 43, key is default/ns:d/1732678425181/Put/seqid=0 2024-11-27T03:33:45,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741835_1011 (size=5153) 2024-11-27T03:33:45,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741835_1011 (size=5153) 2024-11-27T03:33:45,301 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740/.tmp/ns/6315f42ee2bf4fbd8db6084c0d203dae 2024-11-27T03:33:45,311 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740/.tmp/ns/6315f42ee2bf4fbd8db6084c0d203dae as hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740/ns/6315f42ee2bf4fbd8db6084c0d203dae 2024-11-27T03:33:45,320 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740/ns/6315f42ee2bf4fbd8db6084c0d203dae, entries=2, sequenceid=6, filesize=5.0 K 2024-11-27T03:33:45,322 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 46ms, sequenceid=6, compaction requested=false 2024-11-27T03:33:45,322 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-27T03:33:45,328 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-27T03:33:45,329 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:33:45,329 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:33:45,329 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678425276Running coprocessor pre-close hooks at 1732678425276Disabling compacts and flushes for region at 1732678425276Disabling writes for close at 1732678425276Obtaining lock to block concurrent updates at 1732678425276Preparing flush snapshotting stores in 1588230740 at 1732678425276Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732678425276Flushing stores of hbase:meta,,1.1588230740 at 1732678425277 (+1 ms)Flushing 1588230740/ns: creating writer at 1732678425278 (+1 ms)Flushing 1588230740/ns: appending metadata at 1732678425294 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732678425294Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42a4ccac: reopening flushed file at 1732678425310 (+16 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 46ms, sequenceid=6, compaction requested=false at 1732678425322 (+12 ms)Writing region close event to WAL at 1732678425324 (+2 ms)Running coprocessor post-close hooks at 1732678425329 (+5 ms)Closed at 1732678425329 2024-11-27T03:33:45,330 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-27T03:33:45,476 INFO [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(976): stopping server 66d3634da959,34539,1732678424003; all regions closed. 2024-11-27T03:33:45,477 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:45,477 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:45,477 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:45,477 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:45,477 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:45,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741834_1010 (size=1152) 2024-11-27T03:33:45,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741834_1010 (size=1152) 2024-11-27T03:33:45,499 INFO [regionserver/66d3634da959:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-27T03:33:45,499 INFO [regionserver/66d3634da959:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-27T03:33:45,890 DEBUG [RS:0;66d3634da959:34539 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/oldWALs 2024-11-27T03:33:45,891 INFO [RS:0;66d3634da959:34539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C34539%2C1732678424003.meta:.meta(num 1732678425093) 2024-11-27T03:33:45,892 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:45,892 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:45,892 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:33:45,892 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:45,892 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:45,892 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:45,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741833_1009 (size=93) 2024-11-27T03:33:45,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741833_1009 (size=93) 2024-11-27T03:33:45,898 DEBUG [RS:0;66d3634da959:34539 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/oldWALs 2024-11-27T03:33:45,898 INFO [RS:0;66d3634da959:34539 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C34539%2C1732678424003:(num 1732678424633) 2024-11-27T03:33:45,898 DEBUG [RS:0;66d3634da959:34539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:45,898 INFO [RS:0;66d3634da959:34539 {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:33:45,898 INFO [RS:0;66d3634da959:34539 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:33:45,899 INFO [RS:0;66d3634da959:34539 {}] hbase.ChoreService(370): Chore service for: regionserver/66d3634da959:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-27T03:33:45,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:33:45,899 INFO [RS:0;66d3634da959:34539 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:33:45,899 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:33:45,899 INFO [RS:0;66d3634da959:34539 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34539 2024-11-27T03:33:45,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d3634da959,34539,1732678424003 2024-11-27T03:33:45,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:33:45,947 INFO [RS:0;66d3634da959:34539 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:33:45,957 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d3634da959,34539,1732678424003] 2024-11-27T03:33:45,967 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/66d3634da959,34539,1732678424003 already deleted, retry=false 2024-11-27T03:33:45,967 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 66d3634da959,34539,1732678424003 expired; onlineServers=0 2024-11-27T03:33:45,967 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '66d3634da959,33925,1732678423790' ***** 2024-11-27T03:33:45,967 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-27T03:33:45,967 INFO [M:0;66d3634da959:33925 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:33:45,968 INFO [M:0;66d3634da959:33925 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:33:45,968 DEBUG [M:0;66d3634da959:33925 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-27T03:33:45,968 DEBUG [M:0;66d3634da959:33925 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-27T03:33:45,968 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-27T03:33:45,968 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678424398 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678424398,5,FailOnTimeoutGroup] 2024-11-27T03:33:45,968 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678424398 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678424398,5,FailOnTimeoutGroup] 2024-11-27T03:33:45,968 INFO [M:0;66d3634da959:33925 {}] hbase.ChoreService(370): Chore service for: master/66d3634da959:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-27T03:33:45,968 INFO [M:0;66d3634da959:33925 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:33:45,968 DEBUG [M:0;66d3634da959:33925 {}] master.HMaster(1795): Stopping service threads 2024-11-27T03:33:45,968 INFO [M:0;66d3634da959:33925 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-27T03:33:45,968 INFO [M:0;66d3634da959:33925 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:33:45,969 INFO [M:0;66d3634da959:33925 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-27T03:33:45,969 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-27T03:33:45,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-27T03:33:45,978 DEBUG [M:0;66d3634da959:33925 {}] zookeeper.ZKUtil(347): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-27T03:33:45,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:45,978 WARN [M:0;66d3634da959:33925 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-27T03:33:45,979 INFO [M:0;66d3634da959:33925 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/.lastflushedseqids 2024-11-27T03:33:45,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741836_1012 (size=99) 2024-11-27T03:33:45,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741836_1012 (size=99) 2024-11-27T03:33:45,986 INFO [M:0;66d3634da959:33925 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-27T03:33:45,986 INFO [M:0;66d3634da959:33925 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-27T03:33:45,986 DEBUG [M:0;66d3634da959:33925 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:33:45,986 INFO [M:0;66d3634da959:33925 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:45,986 DEBUG [M:0;66d3634da959:33925 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:45,986 DEBUG [M:0;66d3634da959:33925 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:33:45,986 DEBUG [M:0;66d3634da959:33925 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:45,986 INFO [M:0;66d3634da959:33925 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-27T03:33:46,003 DEBUG [M:0;66d3634da959:33925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc77b02df3e44387834cef80f6ed8172 is 82, key is hbase:meta,,1/info:regioninfo/1732678425124/Put/seqid=0 2024-11-27T03:33:46,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741837_1013 (size=5672) 2024-11-27T03:33:46,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741837_1013 (size=5672) 2024-11-27T03:33:46,009 INFO [M:0;66d3634da959:33925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc77b02df3e44387834cef80f6ed8172 2024-11-27T03:33:46,030 DEBUG [M:0;66d3634da959:33925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c7dafd9b9e39466c8bb046f5cde06ecc is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732678425189/Put/seqid=0 2024-11-27T03:33:46,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741838_1014 (size=5275) 2024-11-27T03:33:46,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741838_1014 (size=5275) 2024-11-27T03:33:46,036 INFO [M:0;66d3634da959:33925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c7dafd9b9e39466c8bb046f5cde06ecc 2024-11-27T03:33:46,057 INFO [RS:0;66d3634da959:34539 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:33:46,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:33:46,057 INFO [RS:0;66d3634da959:34539 {}] regionserver.HRegionServer(1031): Exiting; stopping=66d3634da959,34539,1732678424003; zookeeper connection closed. 2024-11-27T03:33:46,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34539-0x1017a5a90890001, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:33:46,058 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@35d1d759 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@35d1d759 2024-11-27T03:33:46,058 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-27T03:33:46,060 DEBUG [M:0;66d3634da959:33925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a0c58003e514f64abd3e712735f2433 is 69, key is 66d3634da959,34539,1732678424003/rs:state/1732678424472/Put/seqid=0 2024-11-27T03:33:46,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741839_1015 (size=5156) 2024-11-27T03:33:46,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741839_1015 (size=5156) 2024-11-27T03:33:46,066 INFO [M:0;66d3634da959:33925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a0c58003e514f64abd3e712735f2433 2024-11-27T03:33:46,086 DEBUG [M:0;66d3634da959:33925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b5dead55e9c4f66adc20c79811e0b6d is 52, key is load_balancer_on/state:d/1732678425250/Put/seqid=0 2024-11-27T03:33:46,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741840_1016 (size=5056) 2024-11-27T03:33:46,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741840_1016 (size=5056) 2024-11-27T03:33:46,092 INFO [M:0;66d3634da959:33925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b5dead55e9c4f66adc20c79811e0b6d 2024-11-27T03:33:46,099 DEBUG [M:0;66d3634da959:33925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc77b02df3e44387834cef80f6ed8172 as hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc77b02df3e44387834cef80f6ed8172 2024-11-27T03:33:46,106 INFO [M:0;66d3634da959:33925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc77b02df3e44387834cef80f6ed8172, entries=8, sequenceid=29, filesize=5.5 K 2024-11-27T03:33:46,108 DEBUG [M:0;66d3634da959:33925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c7dafd9b9e39466c8bb046f5cde06ecc as hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c7dafd9b9e39466c8bb046f5cde06ecc 2024-11-27T03:33:46,116 INFO [M:0;66d3634da959:33925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c7dafd9b9e39466c8bb046f5cde06ecc, entries=3, sequenceid=29, filesize=5.2 K 2024-11-27T03:33:46,118 DEBUG [M:0;66d3634da959:33925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a0c58003e514f64abd3e712735f2433 as hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a0c58003e514f64abd3e712735f2433 2024-11-27T03:33:46,125 INFO [M:0;66d3634da959:33925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a0c58003e514f64abd3e712735f2433, entries=1, sequenceid=29, filesize=5.0 K 2024-11-27T03:33:46,126 DEBUG [M:0;66d3634da959:33925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2b5dead55e9c4f66adc20c79811e0b6d as hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2b5dead55e9c4f66adc20c79811e0b6d 2024-11-27T03:33:46,134 INFO [M:0;66d3634da959:33925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39527/user/jenkins/test-data/808aff44-736b-7101-a580-146ac55be861/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2b5dead55e9c4f66adc20c79811e0b6d, entries=1, sequenceid=29, filesize=4.9 K 2024-11-27T03:33:46,135 INFO [M:0;66d3634da959:33925 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false 2024-11-27T03:33:46,137 INFO [M:0;66d3634da959:33925 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:46,137 DEBUG [M:0;66d3634da959:33925 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678425986Disabling compacts and flushes for region at 1732678425986Disabling writes for close at 1732678425986Obtaining lock to block concurrent updates at 1732678425986Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732678425986Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732678425987 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732678425988 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732678425988Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732678426003 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732678426003Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732678426015 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732678426030 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732678426030Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732678426043 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732678426060 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732678426060Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732678426072 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732678426085 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732678426085Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e69051e: reopening flushed file at 1732678426098 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c3d1fd5: reopening flushed file at 1732678426107 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d04605e: reopening flushed file at 1732678426116 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57e95fa0: reopening flushed file at 1732678426125 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=29, compaction requested=false at 1732678426136 (+11 ms)Writing region close event to WAL at 1732678426137 (+1 ms)Closed at 1732678426137 2024-11-27T03:33:46,138 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:46,138 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:46,138 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:46,138 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:46,138 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:33:46,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41361 is added to blk_1073741830_1006 (size=10311) 2024-11-27T03:33:46,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44499 is added to blk_1073741830_1006 (size=10311) 2024-11-27T03:33:46,141 INFO [M:0;66d3634da959:33925 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-27T03:33:46,141 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:33:46,141 INFO [M:0;66d3634da959:33925 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33925 2024-11-27T03:33:46,141 INFO [M:0;66d3634da959:33925 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:33:46,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:33:46,254 INFO [M:0;66d3634da959:33925 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:33:46,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33925-0x1017a5a90890000, quorum=127.0.0.1:56485, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:33:46,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@da5059a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:33:46,261 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:33:46,261 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:33:46,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:33:46,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/hadoop.log.dir/,STOPPED} 2024-11-27T03:33:46,264 WARN [BP-97074840-172.17.0.3-1732678421535 heartbeating to localhost/127.0.0.1:39527 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:33:46,264 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:33:46,265 WARN [BP-97074840-172.17.0.3-1732678421535 heartbeating to localhost/127.0.0.1:39527 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-97074840-172.17.0.3-1732678421535 (Datanode Uuid 97022696-7be4-4934-9bf4-c1a73e6a2ce1) service to localhost/127.0.0.1:39527 2024-11-27T03:33:46,265 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:33:46,265 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/data/data3/current/BP-97074840-172.17.0.3-1732678421535 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:33:46,265 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/data/data4/current/BP-97074840-172.17.0.3-1732678421535 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:33:46,265 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:33:46,267 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4595827f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:33:46,267 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:33:46,268 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:33:46,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:33:46,268 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/hadoop.log.dir/,STOPPED} 2024-11-27T03:33:46,269 WARN [BP-97074840-172.17.0.3-1732678421535 heartbeating to localhost/127.0.0.1:39527 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:33:46,269 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:33:46,269 WARN [BP-97074840-172.17.0.3-1732678421535 heartbeating to localhost/127.0.0.1:39527 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-97074840-172.17.0.3-1732678421535 (Datanode Uuid b5332970-86ac-451a-9f97-a3aab13b1027) service to localhost/127.0.0.1:39527 2024-11-27T03:33:46,269 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:33:46,269 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/data/data1/current/BP-97074840-172.17.0.3-1732678421535 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:33:46,270 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/cluster_8a5c93c5-6f29-f2a5-e810-2b065c71809b/data/data2/current/BP-97074840-172.17.0.3-1732678421535 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:33:46,270 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:33:46,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55cb1221{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:33:46,276 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:33:46,277 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:33:46,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:33:46,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/hadoop.log.dir/,STOPPED} 2024-11-27T03:33:46,282 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-27T03:33:46,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-27T03:33:46,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-27T03:33:46,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/hadoop.log.dir so I do NOT create it in target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6 2024-11-27T03:33:46,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/59a307fa-090d-7b55-6ad6-8bebe08da773/hadoop.tmp.dir so I do NOT create it in target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6 2024-11-27T03:33:46,300 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d, deleteOnExit=true 2024-11-27T03:33:46,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-27T03:33:46,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/test.cache.data in system properties and HBase conf 2024-11-27T03:33:46,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.tmp.dir in system properties and HBase conf 2024-11-27T03:33:46,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir in system properties and HBase conf 2024-11-27T03:33:46,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-27T03:33:46,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-27T03:33:46,301 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-27T03:33:46,301 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-27T03:33:46,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:33:46,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:33:46,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-27T03:33:46,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:33:46,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-27T03:33:46,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-27T03:33:46,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:33:46,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:33:46,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-27T03:33:46,302 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/nfs.dump.dir in system properties and HBase conf 2024-11-27T03:33:46,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/java.io.tmpdir in system properties and HBase conf 2024-11-27T03:33:46,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:33:46,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-27T03:33:46,303 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-27T03:33:46,316 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:33:46,412 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-27T03:33:46,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:33:46,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:33:46,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:33:46,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:33:46,496 INFO [regionserver/66d3634da959:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:33:46,623 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:33:46,629 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:33:46,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:33:46,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:33:46,630 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:33:46,631 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:33:46,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@692b8c40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:33:46,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ab5393f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:33:46,726 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c461833{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/java.io.tmpdir/jetty-localhost-39425-hadoop-hdfs-3_4_1-tests_jar-_-any-5041256357395071010/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:33:46,726 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33e53d1d{HTTP/1.1, (http/1.1)}{localhost:39425} 2024-11-27T03:33:46,726 INFO [Time-limited test {}] server.Server(415): Started @107770ms 2024-11-27T03:33:46,738 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:33:47,008 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:33:47,011 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:33:47,012 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:33:47,012 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:33:47,012 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:33:47,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51561b8f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:33:47,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@372d60ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:33:47,104 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@158b5ab3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/java.io.tmpdir/jetty-localhost-41877-hadoop-hdfs-3_4_1-tests_jar-_-any-2759084656478167674/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:33:47,105 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1495e1af{HTTP/1.1, (http/1.1)}{localhost:41877} 2024-11-27T03:33:47,105 INFO [Time-limited test {}] server.Server(415): Started @108149ms 2024-11-27T03:33:47,106 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:33:47,135 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:33:47,138 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:33:47,139 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:33:47,139 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:33:47,139 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:33:47,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42443481{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:33:47,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@144f8866{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:33:47,236 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24c93faa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/java.io.tmpdir/jetty-localhost-43559-hadoop-hdfs-3_4_1-tests_jar-_-any-9122029196886193034/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:33:47,237 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e8113bb{HTTP/1.1, (http/1.1)}{localhost:43559} 2024-11-27T03:33:47,237 INFO [Time-limited test {}] server.Server(415): Started @108281ms 2024-11-27T03:33:47,238 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:33:48,494 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data1/current/BP-1346822850-172.17.0.3-1732678426327/current, will proceed with Du for space computation calculation, 2024-11-27T03:33:48,494 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data2/current/BP-1346822850-172.17.0.3-1732678426327/current, will proceed with Du for space computation calculation, 2024-11-27T03:33:48,518 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:33:48,521 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5178b98b3b94de3d with lease ID 0xdc375885b0dc55b2: Processing first storage report for DS-c89509ba-24d7-4de6-84b6-ec35f438cb00 from datanode DatanodeRegistration(127.0.0.1:46751, datanodeUuid=107e5a40-1a85-480e-a4dd-496ab3153acb, infoPort=34777, infoSecurePort=0, ipcPort=40081, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327) 2024-11-27T03:33:48,521 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5178b98b3b94de3d with lease ID 0xdc375885b0dc55b2: from storage DS-c89509ba-24d7-4de6-84b6-ec35f438cb00 node DatanodeRegistration(127.0.0.1:46751, datanodeUuid=107e5a40-1a85-480e-a4dd-496ab3153acb, infoPort=34777, infoSecurePort=0, ipcPort=40081, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:33:48,521 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5178b98b3b94de3d with lease ID 0xdc375885b0dc55b2: Processing first storage report for DS-99a2646b-5b45-4150-aa7c-6d397f4fa926 from datanode DatanodeRegistration(127.0.0.1:46751, datanodeUuid=107e5a40-1a85-480e-a4dd-496ab3153acb, infoPort=34777, infoSecurePort=0, ipcPort=40081, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327) 2024-11-27T03:33:48,521 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5178b98b3b94de3d with lease ID 0xdc375885b0dc55b2: from storage DS-99a2646b-5b45-4150-aa7c-6d397f4fa926 node DatanodeRegistration(127.0.0.1:46751, datanodeUuid=107e5a40-1a85-480e-a4dd-496ab3153acb, infoPort=34777, infoSecurePort=0, ipcPort=40081, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:33:48,630 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data3/current/BP-1346822850-172.17.0.3-1732678426327/current, will proceed with Du for space computation calculation, 2024-11-27T03:33:48,633 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data4/current/BP-1346822850-172.17.0.3-1732678426327/current, will proceed with Du for space computation calculation, 2024-11-27T03:33:48,649 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:33:48,651 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7affee0856508a4b with lease ID 0xdc375885b0dc55b3: Processing first storage report for DS-6ef65769-1808-486b-900d-c1a8593676a8 from datanode DatanodeRegistration(127.0.0.1:41621, datanodeUuid=4404529f-685b-4abe-8546-99a2e400169c, infoPort=46611, infoSecurePort=0, ipcPort=45269, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327) 2024-11-27T03:33:48,651 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7affee0856508a4b with lease ID 0xdc375885b0dc55b3: from storage DS-6ef65769-1808-486b-900d-c1a8593676a8 node DatanodeRegistration(127.0.0.1:41621, datanodeUuid=4404529f-685b-4abe-8546-99a2e400169c, infoPort=46611, infoSecurePort=0, ipcPort=45269, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:33:48,651 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7affee0856508a4b with lease ID 0xdc375885b0dc55b3: Processing first storage report for DS-d56a45d7-5d5e-4eec-a8b2-9911548cdd60 from datanode DatanodeRegistration(127.0.0.1:41621, datanodeUuid=4404529f-685b-4abe-8546-99a2e400169c, infoPort=46611, infoSecurePort=0, ipcPort=45269, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327) 2024-11-27T03:33:48,651 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7affee0856508a4b with lease ID 0xdc375885b0dc55b3: from storage DS-d56a45d7-5d5e-4eec-a8b2-9911548cdd60 node DatanodeRegistration(127.0.0.1:41621, datanodeUuid=4404529f-685b-4abe-8546-99a2e400169c, infoPort=46611, infoSecurePort=0, ipcPort=45269, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:33:48,684 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6 2024-11-27T03:33:48,689 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/zookeeper_0, clientPort=60759, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-27T03:33:48,690 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60759 2024-11-27T03:33:48,691 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:48,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:48,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41621 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:33:48,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46751 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:33:48,706 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4 with version=8 2024-11-27T03:33:48,706 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/hbase-staging 2024-11-27T03:33:48,710 INFO [Time-limited test {}] client.ConnectionUtils(128): master/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:33:48,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:48,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:48,710 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:33:48,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:48,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:33:48,710 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-27T03:33:48,710 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:33:48,711 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45173 2024-11-27T03:33:48,713 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45173 connecting to ZooKeeper ensemble=127.0.0.1:60759 2024-11-27T03:33:48,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:451730x0, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:33:48,769 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45173-0x1017a5aa3ba0000 connected 2024-11-27T03:33:48,852 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:48,853 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:48,855 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:33:48,856 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4, hbase.cluster.distributed=false 2024-11-27T03:33:48,857 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:33:48,858 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45173 2024-11-27T03:33:48,858 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45173 2024-11-27T03:33:48,858 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45173 2024-11-27T03:33:48,859 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45173 2024-11-27T03:33:48,859 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45173 2024-11-27T03:33:48,873 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:33:48,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:48,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:48,873 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:33:48,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:48,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:33:48,874 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T03:33:48,874 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:33:48,874 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38459 2024-11-27T03:33:48,876 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38459 connecting to ZooKeeper ensemble=127.0.0.1:60759 2024-11-27T03:33:48,876 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:48,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:48,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384590x0, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:33:48,894 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:33:48,894 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38459-0x1017a5aa3ba0001 connected 2024-11-27T03:33:48,894 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-27T03:33:48,895 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-27T03:33:48,896 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T03:33:48,897 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:33:48,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38459 2024-11-27T03:33:48,901 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38459 2024-11-27T03:33:48,902 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38459 2024-11-27T03:33:48,902 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38459 2024-11-27T03:33:48,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38459 2024-11-27T03:33:48,914 DEBUG [M:0;66d3634da959:45173 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;66d3634da959:45173 2024-11-27T03:33:48,914 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/66d3634da959,45173,1732678428709 2024-11-27T03:33:48,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:33:48,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:33:48,926 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/66d3634da959,45173,1732678428709 2024-11-27T03:33:48,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:48,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T03:33:48,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:48,936 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T03:33:48,937 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/66d3634da959,45173,1732678428709 from backup master directory 2024-11-27T03:33:48,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:33:48,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/66d3634da959,45173,1732678428709 2024-11-27T03:33:48,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:33:48,946 WARN [master/66d3634da959:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:33:48,946 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=66d3634da959,45173,1732678428709 2024-11-27T03:33:48,951 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/hbase.id] with ID: b0edcb6b-bbff-4a57-9549-21fdf6b851be 2024-11-27T03:33:48,951 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/.tmp/hbase.id 2024-11-27T03:33:48,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41621 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:33:48,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46751 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:33:48,958 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/.tmp/hbase.id]:[hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/hbase.id] 2024-11-27T03:33:48,973 INFO [master/66d3634da959:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:48,973 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-27T03:33:48,975 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-27T03:33:48,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:48,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:48,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41621 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:33:48,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46751 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:33:48,994 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:33:48,995 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-27T03:33:48,995 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:33:49,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46751 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:33:49,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41621 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:33:49,006 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store 2024-11-27T03:33:49,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41621 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:33:49,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46751 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:33:49,014 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:33:49,014 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:33:49,014 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:49,014 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:49,014 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:33:49,014 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:49,015 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:33:49,015 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678429014Disabling compacts and flushes for region at 1732678429014Disabling writes for close at 1732678429014Writing region close event to WAL at 1732678429014Closed at 1732678429014 2024-11-27T03:33:49,016 WARN [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/.initializing 2024-11-27T03:33:49,016 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709 2024-11-27T03:33:49,019 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C45173%2C1732678428709, suffix=, logDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709, archiveDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/oldWALs, maxLogs=10 2024-11-27T03:33:49,019 INFO [master/66d3634da959:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C45173%2C1732678428709.1732678429019 2024-11-27T03:33:49,024 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 2024-11-27T03:33:49,025 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34777:34777),(127.0.0.1/127.0.0.1:46611:46611)] 2024-11-27T03:33:49,026 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:33:49,026 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:33:49,026 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,026 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,027 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-27T03:33:49,029 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:49,029 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,031 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-27T03:33:49,031 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,031 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:33:49,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-27T03:33:49,033 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:33:49,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-27T03:33:49,036 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:33:49,036 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,037 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,038 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,039 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,039 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,040 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-27T03:33:49,041 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:33:49,043 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:33:49,043 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798003, jitterRate=0.014713898301124573}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-27T03:33:49,044 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732678429026Initializing all the Stores at 1732678429027 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678429027Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678429027Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678429027Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678429027Cleaning up temporary data from old regions at 1732678429039 (+12 ms)Region opened successfully at 1732678429044 (+5 ms) 2024-11-27T03:33:49,045 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-27T03:33:49,048 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a41755, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:33:49,049 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-27T03:33:49,049 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-27T03:33:49,049 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-27T03:33:49,049 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-27T03:33:49,050 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-27T03:33:49,050 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-27T03:33:49,050 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-27T03:33:49,053 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-27T03:33:49,054 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-27T03:33:49,062 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-27T03:33:49,062 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-27T03:33:49,063 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-27T03:33:49,072 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-27T03:33:49,073 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-27T03:33:49,074 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-27T03:33:49,083 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-27T03:33:49,084 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-27T03:33:49,093 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-27T03:33:49,098 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-27T03:33:49,104 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-27T03:33:49,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:33:49,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:33:49,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:49,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:49,116 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=66d3634da959,45173,1732678428709, sessionid=0x1017a5aa3ba0000, setting cluster-up flag (Was=false) 2024-11-27T03:33:49,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:49,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:49,167 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-27T03:33:49,169 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,45173,1732678428709 2024-11-27T03:33:49,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:49,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:49,220 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-27T03:33:49,222 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,45173,1732678428709 2024-11-27T03:33:49,224 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-27T03:33:49,226 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-27T03:33:49,227 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-27T03:33:49,227 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-27T03:33:49,227 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 66d3634da959,45173,1732678428709 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-27T03:33:49,229 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:33:49,229 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:33:49,229 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:33:49,230 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:33:49,230 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/66d3634da959:0, corePoolSize=10, maxPoolSize=10 2024-11-27T03:33:49,230 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,230 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:33:49,230 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,231 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732678459231 2024-11-27T03:33:49,231 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-27T03:33:49,231 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-27T03:33:49,231 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-27T03:33:49,231 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-27T03:33:49,231 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-27T03:33:49,232 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-27T03:33:49,232 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,232 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-27T03:33:49,232 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-27T03:33:49,232 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:33:49,233 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-27T03:33:49,233 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-27T03:33:49,233 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-27T03:33:49,233 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-27T03:33:49,233 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678429233,5,FailOnTimeoutGroup] 2024-11-27T03:33:49,234 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678429233,5,FailOnTimeoutGroup] 2024-11-27T03:33:49,234 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,234 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-27T03:33:49,234 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,234 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,234 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,234 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T03:33:49,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41621 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:33:49,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46751 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:33:49,245 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-27T03:33:49,245 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4 2024-11-27T03:33:49,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46751 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:33:49,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41621 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:33:49,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:33:49,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:33:49,257 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:33:49,257 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:49,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:33:49,259 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:33:49,259 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,260 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:49,260 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:33:49,261 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:33:49,261 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:49,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:33:49,264 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:33:49,264 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:49,264 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:33:49,265 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740 2024-11-27T03:33:49,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740 2024-11-27T03:33:49,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:33:49,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:33:49,268 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:33:49,269 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:33:49,271 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:33:49,271 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=724056, jitterRate=-0.07931597530841827}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:33:49,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732678429255Initializing all the Stores at 1732678429255Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678429256 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678429256Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678429256Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678429256Cleaning up temporary data from old regions at 1732678429267 (+11 ms)Region opened successfully at 1732678429272 (+5 ms) 2024-11-27T03:33:49,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:33:49,272 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:33:49,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:33:49,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:33:49,272 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:33:49,273 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:33:49,273 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678429272Disabling compacts and flushes for region at 1732678429272Disabling writes for close at 1732678429272Writing region close event to WAL at 1732678429273 (+1 ms)Closed at 1732678429273 2024-11-27T03:33:49,274 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:33:49,274 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-27T03:33:49,275 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-27T03:33:49,276 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:33:49,277 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-27T03:33:49,306 INFO [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(746): ClusterId : b0edcb6b-bbff-4a57-9549-21fdf6b851be 2024-11-27T03:33:49,307 DEBUG [RS:0;66d3634da959:38459 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-27T03:33:49,317 DEBUG [RS:0;66d3634da959:38459 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-27T03:33:49,317 DEBUG [RS:0;66d3634da959:38459 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-27T03:33:49,328 DEBUG [RS:0;66d3634da959:38459 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-27T03:33:49,329 DEBUG [RS:0;66d3634da959:38459 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62b6da10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:33:49,344 DEBUG [RS:0;66d3634da959:38459 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;66d3634da959:38459 2024-11-27T03:33:49,344 INFO [RS:0;66d3634da959:38459 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-27T03:33:49,344 INFO [RS:0;66d3634da959:38459 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-27T03:33:49,344 DEBUG [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-27T03:33:49,345 INFO [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(2659): reportForDuty to master=66d3634da959,45173,1732678428709 with port=38459, startcode=1732678428873 2024-11-27T03:33:49,345 DEBUG [RS:0;66d3634da959:38459 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T03:33:49,347 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59767, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T03:33:49,347 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45173 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 66d3634da959,38459,1732678428873 2024-11-27T03:33:49,347 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45173 {}] master.ServerManager(517): Registering regionserver=66d3634da959,38459,1732678428873 2024-11-27T03:33:49,349 DEBUG [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4 2024-11-27T03:33:49,349 DEBUG [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44897 2024-11-27T03:33:49,349 DEBUG [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-27T03:33:49,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:33:49,357 DEBUG [RS:0;66d3634da959:38459 {}] zookeeper.ZKUtil(111): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d3634da959,38459,1732678428873 2024-11-27T03:33:49,357 WARN [RS:0;66d3634da959:38459 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:33:49,357 INFO [RS:0;66d3634da959:38459 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:33:49,357 DEBUG [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873 2024-11-27T03:33:49,358 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d3634da959,38459,1732678428873] 2024-11-27T03:33:49,361 INFO [RS:0;66d3634da959:38459 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-27T03:33:49,364 INFO [RS:0;66d3634da959:38459 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-27T03:33:49,365 INFO [RS:0;66d3634da959:38459 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T03:33:49,365 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,365 INFO [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-27T03:33:49,366 INFO [RS:0;66d3634da959:38459 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-27T03:33:49,366 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,367 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,367 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,367 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,367 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,367 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,367 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:33:49,367 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,367 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,367 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,367 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,368 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,368 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:49,368 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:33:49,368 DEBUG [RS:0;66d3634da959:38459 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:33:49,368 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,368 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,369 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,369 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,369 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,369 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38459,1732678428873-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:33:49,387 INFO [RS:0;66d3634da959:38459 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-27T03:33:49,388 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38459,1732678428873-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,388 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,388 INFO [RS:0;66d3634da959:38459 {}] regionserver.Replication(171): 66d3634da959,38459,1732678428873 started 2024-11-27T03:33:49,400 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:49,401 INFO [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1482): Serving as 66d3634da959,38459,1732678428873, RpcServer on 66d3634da959/172.17.0.3:38459, sessionid=0x1017a5aa3ba0001 2024-11-27T03:33:49,401 DEBUG [RS:0;66d3634da959:38459 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-27T03:33:49,401 DEBUG [RS:0;66d3634da959:38459 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d3634da959,38459,1732678428873 2024-11-27T03:33:49,401 DEBUG [RS:0;66d3634da959:38459 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,38459,1732678428873' 2024-11-27T03:33:49,401 DEBUG [RS:0;66d3634da959:38459 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-27T03:33:49,402 DEBUG [RS:0;66d3634da959:38459 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-27T03:33:49,402 DEBUG [RS:0;66d3634da959:38459 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-27T03:33:49,402 DEBUG [RS:0;66d3634da959:38459 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-27T03:33:49,402 DEBUG [RS:0;66d3634da959:38459 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d3634da959,38459,1732678428873 2024-11-27T03:33:49,402 DEBUG [RS:0;66d3634da959:38459 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,38459,1732678428873' 2024-11-27T03:33:49,402 DEBUG [RS:0;66d3634da959:38459 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-27T03:33:49,403 DEBUG [RS:0;66d3634da959:38459 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-27T03:33:49,403 DEBUG [RS:0;66d3634da959:38459 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-27T03:33:49,404 INFO [RS:0;66d3634da959:38459 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-27T03:33:49,404 INFO [RS:0;66d3634da959:38459 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-27T03:33:49,428 WARN [66d3634da959:45173 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-27T03:33:49,508 INFO [RS:0;66d3634da959:38459 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C38459%2C1732678428873, suffix=, logDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873, archiveDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/oldWALs, maxLogs=32 2024-11-27T03:33:49,510 INFO [RS:0;66d3634da959:38459 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C38459%2C1732678428873.1732678429510 2024-11-27T03:33:49,520 INFO [RS:0;66d3634da959:38459 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 2024-11-27T03:33:49,521 DEBUG [RS:0;66d3634da959:38459 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46611:46611),(127.0.0.1/127.0.0.1:34777:34777)] 2024-11-27T03:33:49,678 DEBUG [66d3634da959:45173 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-27T03:33:49,679 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=66d3634da959,38459,1732678428873 2024-11-27T03:33:49,683 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,38459,1732678428873, state=OPENING 2024-11-27T03:33:49,694 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-27T03:33:49,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:49,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:33:49,781 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:33:49,781 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:33:49,781 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:33:49,781 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,38459,1732678428873}] 2024-11-27T03:33:49,937 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-27T03:33:49,944 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37421, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-27T03:33:49,948 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-27T03:33:49,949 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:33:49,951 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C38459%2C1732678428873.meta, suffix=.meta, logDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873, archiveDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/oldWALs, maxLogs=32 2024-11-27T03:33:49,952 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta 2024-11-27T03:33:49,960 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta 2024-11-27T03:33:49,962 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34777:34777),(127.0.0.1/127.0.0.1:46611:46611)] 2024-11-27T03:33:49,963 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:33:49,963 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-27T03:33:49,963 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-27T03:33:49,963 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-27T03:33:49,964 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-27T03:33:49,964 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:33:49,964 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-27T03:33:49,964 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-27T03:33:49,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:33:49,968 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:33:49,968 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,969 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:49,969 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:33:49,970 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:33:49,970 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,971 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:49,971 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:33:49,972 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:33:49,972 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,973 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:49,973 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:33:49,974 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:33:49,974 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:49,974 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:33:49,974 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:33:49,975 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740 2024-11-27T03:33:49,976 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740 2024-11-27T03:33:49,978 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:33:49,978 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:33:49,978 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:33:49,980 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:33:49,981 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857047, jitterRate=0.08979195356369019}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:33:49,981 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-27T03:33:49,982 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732678429964Writing region info on filesystem at 1732678429964Initializing all the Stores at 1732678429966 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678429966Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678429966Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678429966Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678429966Cleaning up temporary data from old regions at 1732678429978 (+12 ms)Running coprocessor post-open hooks at 1732678429981 (+3 ms)Region opened successfully at 1732678429981 2024-11-27T03:33:49,983 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732678429937 2024-11-27T03:33:49,986 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-27T03:33:49,986 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-27T03:33:49,987 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,38459,1732678428873 2024-11-27T03:33:49,988 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,38459,1732678428873, state=OPEN 2024-11-27T03:33:50,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:33:50,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:33:50,029 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=66d3634da959,38459,1732678428873 2024-11-27T03:33:50,029 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:33:50,029 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:33:50,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-27T03:33:50,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,38459,1732678428873 in 248 msec 2024-11-27T03:33:50,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-27T03:33:50,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 757 msec 2024-11-27T03:33:50,037 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:33:50,038 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-27T03:33:50,039 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:33:50,039 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,38459,1732678428873, seqNum=-1] 2024-11-27T03:33:50,039 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:33:50,041 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55329, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:33:50,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 821 msec 2024-11-27T03:33:50,048 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732678430048, completionTime=-1 2024-11-27T03:33:50,048 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-27T03:33:50,048 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-27T03:33:50,050 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-27T03:33:50,050 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732678490050 2024-11-27T03:33:50,051 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732678550050 2024-11-27T03:33:50,051 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-27T03:33:50,051 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45173,1732678428709-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,051 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45173,1732678428709-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,051 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45173,1732678428709-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,051 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-66d3634da959:45173, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,051 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,051 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,054 DEBUG [master/66d3634da959:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-27T03:33:50,056 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.110sec 2024-11-27T03:33:50,056 INFO [master/66d3634da959:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-27T03:33:50,056 INFO [master/66d3634da959:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-27T03:33:50,056 INFO [master/66d3634da959:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-27T03:33:50,057 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-27T03:33:50,057 INFO [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-27T03:33:50,057 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45173,1732678428709-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:33:50,057 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45173,1732678428709-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-27T03:33:50,059 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-27T03:33:50,059 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-27T03:33:50,059 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45173,1732678428709-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@142e8458, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:33:50,107 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 66d3634da959,45173,-1 for getting cluster id 2024-11-27T03:33:50,107 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-27T03:33:50,109 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b0edcb6b-bbff-4a57-9549-21fdf6b851be' 2024-11-27T03:33:50,110 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-27T03:33:50,110 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b0edcb6b-bbff-4a57-9549-21fdf6b851be" 2024-11-27T03:33:50,110 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e7809cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:33:50,111 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [66d3634da959,45173,-1] 2024-11-27T03:33:50,111 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-27T03:33:50,111 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:33:50,113 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55806, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-27T03:33:50,114 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ee117d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:33:50,115 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:33:50,116 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,38459,1732678428873, seqNum=-1] 2024-11-27T03:33:50,116 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:33:50,118 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53614, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:33:50,120 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=66d3634da959,45173,1732678428709 2024-11-27T03:33:50,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:50,126 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-27T03:33:50,169 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:33:50,169 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:50,169 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:50,169 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:33:50,169 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:33:50,169 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:33:50,169 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T03:33:50,169 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:33:50,170 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40479 2024-11-27T03:33:50,171 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40479 connecting to ZooKeeper ensemble=127.0.0.1:60759 2024-11-27T03:33:50,172 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:50,173 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:33:50,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:404790x0, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:33:50,197 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:404790x0, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-27T03:33:50,197 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-27T03:33:50,197 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40479-0x1017a5aa3ba0002 connected 2024-11-27T03:33:50,198 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-27T03:33:50,198 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-27T03:33:50,199 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:40479-0x1017a5aa3ba0002, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T03:33:50,201 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40479-0x1017a5aa3ba0002, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:33:50,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40479 2024-11-27T03:33:50,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40479 2024-11-27T03:33:50,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40479 2024-11-27T03:33:50,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40479 2024-11-27T03:33:50,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40479 2024-11-27T03:33:50,205 INFO [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(746): ClusterId : b0edcb6b-bbff-4a57-9549-21fdf6b851be 2024-11-27T03:33:50,205 DEBUG [RS:1;66d3634da959:40479 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-27T03:33:50,218 DEBUG [RS:1;66d3634da959:40479 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-27T03:33:50,218 DEBUG [RS:1;66d3634da959:40479 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-27T03:33:50,232 DEBUG [RS:1;66d3634da959:40479 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-27T03:33:50,232 DEBUG [RS:1;66d3634da959:40479 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20b24d16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:33:50,247 DEBUG [RS:1;66d3634da959:40479 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;66d3634da959:40479 2024-11-27T03:33:50,247 INFO [RS:1;66d3634da959:40479 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-27T03:33:50,247 INFO [RS:1;66d3634da959:40479 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-27T03:33:50,247 DEBUG [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-27T03:33:50,248 INFO [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(2659): reportForDuty to master=66d3634da959,45173,1732678428709 with port=40479, startcode=1732678430168 2024-11-27T03:33:50,248 DEBUG [RS:1;66d3634da959:40479 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T03:33:50,250 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48105, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T03:33:50,250 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45173 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 66d3634da959,40479,1732678430168 2024-11-27T03:33:50,250 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45173 {}] master.ServerManager(517): Registering regionserver=66d3634da959,40479,1732678430168 2024-11-27T03:33:50,252 DEBUG [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4 2024-11-27T03:33:50,252 DEBUG [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44897 2024-11-27T03:33:50,252 DEBUG [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-27T03:33:50,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:33:50,263 DEBUG [RS:1;66d3634da959:40479 {}] zookeeper.ZKUtil(111): regionserver:40479-0x1017a5aa3ba0002, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d3634da959,40479,1732678430168 2024-11-27T03:33:50,263 WARN [RS:1;66d3634da959:40479 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:33:50,263 INFO [RS:1;66d3634da959:40479 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:33:50,263 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d3634da959,40479,1732678430168] 2024-11-27T03:33:50,263 DEBUG [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168 2024-11-27T03:33:50,268 INFO [RS:1;66d3634da959:40479 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-27T03:33:50,272 INFO [RS:1;66d3634da959:40479 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-27T03:33:50,272 INFO [RS:1;66d3634da959:40479 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T03:33:50,272 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,273 INFO [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-27T03:33:50,274 INFO [RS:1;66d3634da959:40479 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-27T03:33:50,274 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,274 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,274 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,274 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,274 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,274 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,274 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:33:50,274 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,275 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,275 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,275 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,275 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,275 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:33:50,275 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:33:50,275 DEBUG [RS:1;66d3634da959:40479 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:33:50,276 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,276 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,276 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,276 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,276 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,276 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,40479,1732678430168-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:33:50,291 INFO [RS:1;66d3634da959:40479 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-27T03:33:50,291 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,40479,1732678430168-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,291 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,291 INFO [RS:1;66d3634da959:40479 {}] regionserver.Replication(171): 66d3634da959,40479,1732678430168 started 2024-11-27T03:33:50,303 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:33:50,303 INFO [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(1482): Serving as 66d3634da959,40479,1732678430168, RpcServer on 66d3634da959/172.17.0.3:40479, sessionid=0x1017a5aa3ba0002 2024-11-27T03:33:50,304 DEBUG [RS:1;66d3634da959:40479 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-27T03:33:50,304 DEBUG [RS:1;66d3634da959:40479 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d3634da959,40479,1732678430168 2024-11-27T03:33:50,304 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;66d3634da959:40479,5,FailOnTimeoutGroup] 2024-11-27T03:33:50,304 DEBUG [RS:1;66d3634da959:40479 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,40479,1732678430168' 2024-11-27T03:33:50,304 DEBUG [RS:1;66d3634da959:40479 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-27T03:33:50,304 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-27T03:33:50,304 DEBUG [RS:1;66d3634da959:40479 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-27T03:33:50,304 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-27T03:33:50,305 DEBUG [RS:1;66d3634da959:40479 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-27T03:33:50,305 DEBUG [RS:1;66d3634da959:40479 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-27T03:33:50,305 DEBUG [RS:1;66d3634da959:40479 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d3634da959,40479,1732678430168 2024-11-27T03:33:50,305 DEBUG [RS:1;66d3634da959:40479 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,40479,1732678430168' 2024-11-27T03:33:50,305 DEBUG [RS:1;66d3634da959:40479 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-27T03:33:50,306 DEBUG [RS:1;66d3634da959:40479 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-27T03:33:50,306 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 66d3634da959,45173,1732678428709 2024-11-27T03:33:50,306 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@9d3ec26 2024-11-27T03:33:50,306 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T03:33:50,306 DEBUG [RS:1;66d3634da959:40479 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-27T03:33:50,306 INFO [RS:1;66d3634da959:40479 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-27T03:33:50,306 INFO [RS:1;66d3634da959:40479 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-27T03:33:50,308 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55820, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T03:33:50,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45173 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-27T03:33:50,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45173 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-27T03:33:50,309 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45173 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:33:50,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45173 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-27T03:33:50,312 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T03:33:50,312 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:50,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45173 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-27T03:33:50,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45173 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-27T03:33:50,313 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T03:33:50,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46751 is added to blk_1073741835_1011 (size=393) 2024-11-27T03:33:50,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41621 is added to blk_1073741835_1011 (size=393) 2024-11-27T03:33:50,323 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 30ae0c164627c581508f27dd9b02481f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4 2024-11-27T03:33:50,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46751 is added to blk_1073741836_1012 (size=76) 2024-11-27T03:33:50,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41621 is added to blk_1073741836_1012 (size=76) 2024-11-27T03:33:50,330 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:33:50,330 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 30ae0c164627c581508f27dd9b02481f, disabling compactions & flushes 2024-11-27T03:33:50,330 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:33:50,330 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:33:50,330 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. after waiting 0 ms 2024-11-27T03:33:50,331 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:33:50,331 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:33:50,331 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 30ae0c164627c581508f27dd9b02481f: Waiting for close lock at 1732678430330Disabling compacts and flushes for region at 1732678430330Disabling writes for close at 1732678430331 (+1 ms)Writing region close event to WAL at 1732678430331Closed at 1732678430331 2024-11-27T03:33:50,332 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T03:33:50,333 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732678430332"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732678430332"}]},"ts":"1732678430332"} 2024-11-27T03:33:50,335 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-27T03:33:50,336 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T03:33:50,337 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732678430337"}]},"ts":"1732678430337"} 2024-11-27T03:33:50,339 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-27T03:33:50,340 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=30ae0c164627c581508f27dd9b02481f, ASSIGN}] 2024-11-27T03:33:50,342 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=30ae0c164627c581508f27dd9b02481f, ASSIGN 2024-11-27T03:33:50,343 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=30ae0c164627c581508f27dd9b02481f, ASSIGN; state=OFFLINE, location=66d3634da959,38459,1732678428873; forceNewPlan=false, retain=false 2024-11-27T03:33:50,408 INFO [RS:1;66d3634da959:40479 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C40479%2C1732678430168, suffix=, logDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168, archiveDir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/oldWALs, maxLogs=32 2024-11-27T03:33:50,409 INFO [RS:1;66d3634da959:40479 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C40479%2C1732678430168.1732678430409 2024-11-27T03:33:50,416 INFO [RS:1;66d3634da959:40479 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 2024-11-27T03:33:50,420 DEBUG [RS:1;66d3634da959:40479 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46611:46611),(127.0.0.1/127.0.0.1:34777:34777)] 2024-11-27T03:33:50,494 INFO [66d3634da959:45173 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-27T03:33:50,494 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=30ae0c164627c581508f27dd9b02481f, regionState=OPENING, regionLocation=66d3634da959,38459,1732678428873 2024-11-27T03:33:50,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=30ae0c164627c581508f27dd9b02481f, ASSIGN because future has completed 2024-11-27T03:33:50,497 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 30ae0c164627c581508f27dd9b02481f, server=66d3634da959,38459,1732678428873}] 2024-11-27T03:33:50,658 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:33:50,659 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 30ae0c164627c581508f27dd9b02481f, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:33:50,660 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,660 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:33:50,660 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,660 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,663 INFO [StoreOpener-30ae0c164627c581508f27dd9b02481f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,665 INFO [StoreOpener-30ae0c164627c581508f27dd9b02481f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 30ae0c164627c581508f27dd9b02481f columnFamilyName info 2024-11-27T03:33:50,665 DEBUG [StoreOpener-30ae0c164627c581508f27dd9b02481f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:33:50,665 INFO [StoreOpener-30ae0c164627c581508f27dd9b02481f-1 {}] regionserver.HStore(327): Store=30ae0c164627c581508f27dd9b02481f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:33:50,665 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,666 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,666 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,667 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,667 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,668 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,670 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:33:50,670 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 30ae0c164627c581508f27dd9b02481f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849773, jitterRate=0.08054307103157043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-27T03:33:50,670 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:33:50,671 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 30ae0c164627c581508f27dd9b02481f: Running coprocessor pre-open hook at 1732678430660Writing region info on filesystem at 1732678430660Initializing all the Stores at 1732678430662 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678430662Cleaning up temporary data from old regions at 1732678430667 (+5 ms)Running coprocessor post-open hooks at 1732678430670 (+3 ms)Region opened successfully at 1732678430671 (+1 ms) 2024-11-27T03:33:50,672 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f., pid=6, masterSystemTime=1732678430651 2024-11-27T03:33:50,674 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:33:50,674 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:33:50,675 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=30ae0c164627c581508f27dd9b02481f, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,38459,1732678428873 2024-11-27T03:33:50,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 30ae0c164627c581508f27dd9b02481f, server=66d3634da959,38459,1732678428873 because future has completed 2024-11-27T03:33:50,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-27T03:33:50,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 30ae0c164627c581508f27dd9b02481f, server=66d3634da959,38459,1732678428873 in 183 msec 2024-11-27T03:33:50,687 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-27T03:33:50,687 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=30ae0c164627c581508f27dd9b02481f, ASSIGN in 343 msec 2024-11-27T03:33:50,688 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T03:33:50,688 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732678430688"}]},"ts":"1732678430688"} 2024-11-27T03:33:50,691 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-27T03:33:50,692 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T03:33:50,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 384 msec 2024-11-27T03:33:55,466 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-27T03:33:55,468 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:33:55,488 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:33:55,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:33:55,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:33:55,498 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-27T03:33:55,498 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-27T03:33:55,500 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-27T03:34:00,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45173 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-27T03:34:00,346 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-27T03:34:00,346 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-27T03:34:00,355 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-27T03:34:00,355 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:34:00,371 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:34:00,374 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:34:00,375 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:34:00,375 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:34:00,375 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:34:00,375 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fb911ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:34:00,376 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719bbb9b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:34:00,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f0a2519{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/java.io.tmpdir/jetty-localhost-35837-hadoop-hdfs-3_4_1-tests_jar-_-any-9558892528341342407/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:00,469 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68004957{HTTP/1.1, (http/1.1)}{localhost:35837} 2024-11-27T03:34:00,469 INFO [Time-limited test {}] server.Server(415): Started @121514ms 2024-11-27T03:34:00,471 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:34:00,502 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:34:00,505 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:34:00,506 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:34:00,506 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:34:00,506 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:34:00,506 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64b86931{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:34:00,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@624ef820{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:34:00,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@786c904f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/java.io.tmpdir/jetty-localhost-36409-hadoop-hdfs-3_4_1-tests_jar-_-any-7190925213054037755/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:00,600 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18a0c61c{HTTP/1.1, (http/1.1)}{localhost:36409} 2024-11-27T03:34:00,600 INFO [Time-limited test {}] server.Server(415): Started @121644ms 2024-11-27T03:34:00,601 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:34:00,627 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:34:00,629 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:34:00,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:34:00,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:34:00,630 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:34:00,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@339da5f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:34:00,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fb4bc9e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:34:00,724 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4a9b13ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/java.io.tmpdir/jetty-localhost-42545-hadoop-hdfs-3_4_1-tests_jar-_-any-636389015494368360/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:00,724 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61f29988{HTTP/1.1, (http/1.1)}{localhost:42545} 2024-11-27T03:34:00,724 INFO [Time-limited test {}] server.Server(415): Started @121768ms 2024-11-27T03:34:00,725 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:34:01,699 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5/current/BP-1346822850-172.17.0.3-1732678426327/current, will proceed with Du for space computation calculation, 2024-11-27T03:34:01,699 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6/current/BP-1346822850-172.17.0.3-1732678426327/current, will proceed with Du for space computation calculation, 2024-11-27T03:34:01,723 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:34:01,725 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x131fe5bbea79dbe7 with lease ID 0xdc375885b0dc55b4: Processing first storage report for DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0 from datanode DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327) 2024-11-27T03:34:01,725 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x131fe5bbea79dbe7 with lease ID 0xdc375885b0dc55b4: from storage DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0 node DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:34:01,725 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x131fe5bbea79dbe7 with lease ID 0xdc375885b0dc55b4: Processing first storage report for DS-2abd904a-f288-470e-a16c-f098a8f8afed from datanode DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327) 2024-11-27T03:34:01,725 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x131fe5bbea79dbe7 with lease ID 0xdc375885b0dc55b4: from storage DS-2abd904a-f288-470e-a16c-f098a8f8afed node DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:34:01,949 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data7/current/BP-1346822850-172.17.0.3-1732678426327/current, will proceed with Du for space computation calculation, 2024-11-27T03:34:01,950 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data8/current/BP-1346822850-172.17.0.3-1732678426327/current, will proceed with Du for space computation calculation, 2024-11-27T03:34:01,965 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:34:01,967 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16c27c08d1c5ce13 with lease ID 0xdc375885b0dc55b5: Processing first storage report for DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1 from datanode DatanodeRegistration(127.0.0.1:45573, datanodeUuid=caaa0116-738a-4175-82df-50d1e7068a9d, infoPort=41091, infoSecurePort=0, ipcPort=43201, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327) 2024-11-27T03:34:01,968 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16c27c08d1c5ce13 with lease ID 0xdc375885b0dc55b5: from storage DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1 node DatanodeRegistration(127.0.0.1:45573, datanodeUuid=caaa0116-738a-4175-82df-50d1e7068a9d, infoPort=41091, infoSecurePort=0, ipcPort=43201, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:34:01,968 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x16c27c08d1c5ce13 with lease ID 0xdc375885b0dc55b5: Processing first storage report for DS-fd40866b-05a6-4bdb-b4b2-92a4b43030c6 from datanode DatanodeRegistration(127.0.0.1:45573, datanodeUuid=caaa0116-738a-4175-82df-50d1e7068a9d, infoPort=41091, infoSecurePort=0, ipcPort=43201, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327) 2024-11-27T03:34:01,968 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x16c27c08d1c5ce13 with lease ID 0xdc375885b0dc55b5: from storage DS-fd40866b-05a6-4bdb-b4b2-92a4b43030c6 node DatanodeRegistration(127.0.0.1:45573, datanodeUuid=caaa0116-738a-4175-82df-50d1e7068a9d, infoPort=41091, infoSecurePort=0, ipcPort=43201, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:34:02,043 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data9/current/BP-1346822850-172.17.0.3-1732678426327/current, will proceed with Du for space computation calculation, 2024-11-27T03:34:02,043 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data10/current/BP-1346822850-172.17.0.3-1732678426327/current, will proceed with Du for space computation calculation, 2024-11-27T03:34:02,062 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:34:02,065 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25dc53c29c157373 with lease ID 0xdc375885b0dc55b6: Processing first storage report for DS-beceb969-bf1b-4d1d-8062-b41575a8a890 from datanode DatanodeRegistration(127.0.0.1:41625, datanodeUuid=34b1b642-8f0d-4f95-85fc-931918b655bf, infoPort=35999, infoSecurePort=0, ipcPort=36553, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327) 2024-11-27T03:34:02,065 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25dc53c29c157373 with lease ID 0xdc375885b0dc55b6: from storage DS-beceb969-bf1b-4d1d-8062-b41575a8a890 node DatanodeRegistration(127.0.0.1:41625, datanodeUuid=34b1b642-8f0d-4f95-85fc-931918b655bf, infoPort=35999, infoSecurePort=0, ipcPort=36553, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:34:02,065 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25dc53c29c157373 with lease ID 0xdc375885b0dc55b6: Processing first storage report for DS-ad910628-45c1-46e0-8e5e-cfb8b7a2eb5f from datanode DatanodeRegistration(127.0.0.1:41625, datanodeUuid=34b1b642-8f0d-4f95-85fc-931918b655bf, infoPort=35999, infoSecurePort=0, ipcPort=36553, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327) 2024-11-27T03:34:02,065 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25dc53c29c157373 with lease ID 0xdc375885b0dc55b6: from storage DS-ad910628-45c1-46e0-8e5e-cfb8b7a2eb5f node DatanodeRegistration(127.0.0.1:41625, datanodeUuid=34b1b642-8f0d-4f95-85fc-931918b655bf, infoPort=35999, infoSecurePort=0, ipcPort=36553, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:34:02,161 WARN [ResponseProcessor for block BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,161 WARN [ResponseProcessor for block BP-1346822850-172.17.0.3-1732678426327:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1346822850-172.17.0.3-1732678426327:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,161 WARN [ResponseProcessor for block BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,162 WARN [ResponseProcessor for block BP-1346822850-172.17.0.3-1732678426327:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1346822850-172.17.0.3-1732678426327:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,163 WARN [DataStreamer for file /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 block BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:02,163 WARN [DataStreamer for file /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 block BP-1346822850-172.17.0.3-1732678426327:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK], DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:02,163 WARN [DataStreamer for file /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 block BP-1346822850-172.17.0.3-1732678426327:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK], DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:02,163 WARN [DataStreamer for file /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta block BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:02,163 WARN [PacketResponder: BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41621] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,163 WARN [PacketResponder: BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41621] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:58302 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58302 dst: /127.0.0.1:46751 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:47828 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47828 dst: /127.0.0.1:41621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037953256_22 at /127.0.0.1:47810 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47810 dst: /127.0.0.1:41621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:58296 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58296 dst: /127.0.0.1:46751 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,166 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1722042704_22 at /127.0.0.1:58334 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58334 dst: /127.0.0.1:46751 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:47840 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47840 dst: /127.0.0.1:41621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,165 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1722042704_22 at /127.0.0.1:47864 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47864 dst: /127.0.0.1:41621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,167 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037953256_22 at /127.0.0.1:58280 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58280 dst: /127.0.0.1:46751 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24c93faa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:02,196 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e8113bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:34:02,196 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:34:02,196 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@144f8866{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:34:02,197 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42443481{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,STOPPED} 2024-11-27T03:34:02,198 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:34:02,198 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:34:02,198 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1346822850-172.17.0.3-1732678426327 (Datanode Uuid 4404529f-685b-4abe-8546-99a2e400169c) service to localhost/127.0.0.1:44897 2024-11-27T03:34:02,198 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:34:02,198 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data3/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:02,199 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data4/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:02,199 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:34:02,199 WARN [ResponseProcessor for block BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,199 WARN [ResponseProcessor for block BP-1346822850-172.17.0.3-1732678426327:blk_1073741833_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1346822850-172.17.0.3-1732678426327:blk_1073741833_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,199 WARN [ResponseProcessor for block BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,199 WARN [ResponseProcessor for block BP-1346822850-172.17.0.3-1732678426327:blk_1073741837_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1346822850-172.17.0.3-1732678426327:blk_1073741837_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1722042704_22 at /127.0.0.1:60336 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60336 dst: /127.0.0.1:46751 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:60334 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60334 dst: /127.0.0.1:46751 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:60338 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60338 dst: /127.0.0.1:46751 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037953256_22 at /127.0.0.1:60332 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46751:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60332 dst: /127.0.0.1:46751 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:02,202 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@158b5ab3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:02,202 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1495e1af{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:34:02,202 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:34:02,202 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@372d60ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:34:02,202 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51561b8f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,STOPPED} 2024-11-27T03:34:02,204 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:34:02,204 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:34:02,204 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1346822850-172.17.0.3-1732678426327 (Datanode Uuid 107e5a40-1a85-480e-a4dd-496ab3153acb) service to localhost/127.0.0.1:44897 2024-11-27T03:34:02,204 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:34:02,204 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data1/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:02,204 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data2/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:02,205 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:34:02,209 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f., hostname=66d3634da959,38459,1732678428873, seqNum=2] 2024-11-27T03:34:02,210 ERROR [FSHLog-0-hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4-prefix:66d3634da959,38459,1732678428873 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,211 WARN [FSHLog-0-hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4-prefix:66d3634da959,38459,1732678428873 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,211 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C38459%2C1732678428873:(num 1732678429510) roll requested 2024-11-27T03:34:02,211 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C38459%2C1732678428873.1732678442211 2024-11-27T03:34:02,214 WARN [Thread-913 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,214 WARN [Thread-913 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:02,214 WARN [Thread-913 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741838_1018 2024-11-27T03:34:02,216 WARN [Thread-913 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:02,223 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:02,223 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:02,224 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:02,224 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:02,224 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:02,224 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678442211 2024-11-27T03:34:02,224 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,225 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,226 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41091:41091),(127.0.0.1/127.0.0.1:35999:35999)] 2024-11-27T03:34:02,226 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-27T03:34:02,226 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 is not closed yet, will try archiving it next time 2024-11-27T03:34:02,226 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-27T03:34:02,226 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 2024-11-27T03:34:02,229 WARN [IPC Server handler 4 on default port 44897 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1016 2024-11-27T03:34:02,233 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 after 5ms 2024-11-27T03:34:02,277 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:02,997 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:04,226 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:04,228 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678442211 2024-11-27T03:34:04,229 WARN [ResponseProcessor for block BP-1346822850-172.17.0.3-1732678426327:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1346822850-172.17.0.3-1732678426327:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:04,230 WARN [DataStreamer for file /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678442211 block BP-1346822850-172.17.0.3-1732678426327:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK], DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:04,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:59122 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:45573:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59122 dst: /127.0.0.1:45573 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:04,231 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:50284 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:41625:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50284 dst: /127.0.0.1:41625 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:04,277 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:04,312 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@786c904f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:04,313 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18a0c61c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:34:04,313 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:34:04,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@624ef820{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:34:04,313 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64b86931{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,STOPPED} 2024-11-27T03:34:04,315 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:34:04,315 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:34:04,315 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1346822850-172.17.0.3-1732678426327 (Datanode Uuid caaa0116-738a-4175-82df-50d1e7068a9d) service to localhost/127.0.0.1:44897 2024-11-27T03:34:04,315 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:34:04,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data7/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:04,316 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data8/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:04,316 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:34:04,998 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:06,227 WARN [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]] 2024-11-27T03:34:06,228 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:06,228 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C38459%2C1732678428873:(num 1732678442211) roll requested 2024-11-27T03:34:06,229 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C38459%2C1732678428873.1732678446228 2024-11-27T03:34:06,235 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 after 4008ms 2024-11-27T03:34:06,235 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:06,235 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:06,235 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741840_1022 2024-11-27T03:34:06,236 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:06,239 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46751 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:06,239 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:50306 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data10]'}, localName='127.0.0.1:41625', datanodeUuid='34b1b642-8f0d-4f95-85fc-931918b655bf', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741841_1023 to mirror 127.0.0.1:46751 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:06,239 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK], DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]) is bad. 2024-11-27T03:34:06,239 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741841_1023 2024-11-27T03:34:06,240 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:50306 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-27T03:34:06,240 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:50306 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:41625:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50306 dst: /127.0.0.1:41625 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:06,240 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK] 2024-11-27T03:34:06,242 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:06,242 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK], DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:06,242 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741842_1024 2024-11-27T03:34:06,243 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:06,248 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:06,248 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:06,248 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:06,248 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:06,249 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:06,249 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678442211 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678446228 2024-11-27T03:34:06,250 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32925:32925),(127.0.0.1/127.0.0.1:35999:35999)] 2024-11-27T03:34:06,250 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 is not closed yet, will try archiving it next time 2024-11-27T03:34:06,250 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678442211 is not closed yet, will try archiving it next time 2024-11-27T03:34:06,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41625 is added to blk_1073741839_1021 (size=2431) 2024-11-27T03:34:06,278 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:06,321 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-27T03:34:06,654 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 is not closed yet, will try archiving it next time 2024-11-27T03:34:06,998 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:08,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741839_1021 (size=2431) 2024-11-27T03:34:08,250 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:08,278 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:08,325 WARN [ResponseProcessor for block BP-1346822850-172.17.0.3-1732678426327:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1346822850-172.17.0.3-1732678426327:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-1346822850-172.17.0.3-1732678426327:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:08,325 WARN [DataStreamer for file /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678446228 block BP-1346822850-172.17.0.3-1732678426327:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:08,325 WARN [PacketResponder: BP-1346822850-172.17.0.3-1732678426327:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41625] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:08,326 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57176 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57176 dst: /127.0.0.1:37315 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:08,326 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:50308 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:41625:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50308 dst: /127.0.0.1:41625 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:08,362 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4a9b13ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:08,362 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61f29988{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:34:08,363 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:34:08,363 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fb4bc9e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:34:08,363 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@339da5f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,STOPPED} 2024-11-27T03:34:08,366 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:34:08,366 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1346822850-172.17.0.3-1732678426327 (Datanode Uuid 34b1b642-8f0d-4f95-85fc-931918b655bf) service to localhost/127.0.0.1:44897 2024-11-27T03:34:08,366 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:34:08,366 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:34:08,367 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data9/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:08,367 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data10/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:08,368 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:34:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38459 {}] regionserver.HRegion(8855): Flush requested on 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:34:08,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30ae0c164627c581508f27dd9b02481f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:34:08,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/3b71555f0a044c67924c09450eb5175f is 1080, key is row0002/info:/1732678444318/Put/seqid=0 2024-11-27T03:34:08,397 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:08,398 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:08,398 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741844_1027 2024-11-27T03:34:08,398 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:08,399 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:08,399 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK], DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:08,399 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741845_1028 2024-11-27T03:34:08,400 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:08,401 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:08,401 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]) is bad. 2024-11-27T03:34:08,401 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741846_1029 2024-11-27T03:34:08,402 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK] 2024-11-27T03:34:08,403 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:08,403 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:08,403 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741847_1030 2024-11-27T03:34:08,403 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:08,404 WARN [IPC Server handler 4 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-27T03:34:08,404 WARN [IPC Server handler 4 on default port 44897 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-27T03:34:08,404 WARN [IPC Server handler 4 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-27T03:34:08,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741848_1031 (size=10347) 2024-11-27T03:34:08,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/3b71555f0a044c67924c09450eb5175f 2024-11-27T03:34:08,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/3b71555f0a044c67924c09450eb5175f as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/3b71555f0a044c67924c09450eb5175f 2024-11-27T03:34:08,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/3b71555f0a044c67924c09450eb5175f, entries=5, sequenceid=11, filesize=10.1 K 2024-11-27T03:34:08,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 30ae0c164627c581508f27dd9b02481f in 459ms, sequenceid=11, compaction requested=false 2024-11-27T03:34:08,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30ae0c164627c581508f27dd9b02481f: 2024-11-27T03:34:08,999 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:09,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38459 {}] regionserver.HRegion(8855): Flush requested on 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:34:09,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30ae0c164627c581508f27dd9b02481f 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-27T03:34:09,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/204c8f728b354b41a59559d83cafe283 is 1080, key is row0007/info:/1732678448378/Put/seqid=0 2024-11-27T03:34:09,022 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:09,022 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]) is bad. 2024-11-27T03:34:09,022 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741849_1032 2024-11-27T03:34:09,023 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK] 2024-11-27T03:34:09,024 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:09,024 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK], DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:09,024 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741850_1033 2024-11-27T03:34:09,025 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:09,026 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:09,026 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK], DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:09,026 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741851_1034 2024-11-27T03:34:09,027 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:09,029 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45573 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:09,029 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57210 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741852_1035 to mirror 127.0.0.1:45573 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:09,029 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:09,029 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741852_1035 2024-11-27T03:34:09,029 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57210 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-27T03:34:09,029 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57210 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57210 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:09,030 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:09,031 WARN [IPC Server handler 1 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-27T03:34:09,031 WARN [IPC Server handler 1 on default port 44897 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-27T03:34:09,031 WARN [IPC Server handler 1 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-27T03:34:09,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741853_1036 (size=12506) 2024-11-27T03:34:09,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/204c8f728b354b41a59559d83cafe283 2024-11-27T03:34:09,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/204c8f728b354b41a59559d83cafe283 as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/204c8f728b354b41a59559d83cafe283 2024-11-27T03:34:09,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/204c8f728b354b41a59559d83cafe283, entries=7, sequenceid=24, filesize=12.2 K 2024-11-27T03:34:09,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 30ae0c164627c581508f27dd9b02481f in 441ms, sequenceid=24, compaction requested=false 2024-11-27T03:34:09,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30ae0c164627c581508f27dd9b02481f: 2024-11-27T03:34:09,452 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-27T03:34:09,452 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:09,453 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/204c8f728b354b41a59559d83cafe283 because midkey is the same as first or last row 2024-11-27T03:34:10,251 WARN [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]] 2024-11-27T03:34:10,251 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,251 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C38459%2C1732678428873:(num 1732678446228) roll requested 2024-11-27T03:34:10,253 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C38459%2C1732678428873.1732678450252 2024-11-27T03:34:10,259 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,260 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:10,260 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741854_1037 2024-11-27T03:34:10,260 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:10,264 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41621 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,264 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57216 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741855_1038 to mirror 127.0.0.1:41621 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,264 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:10,264 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741855_1038 2024-11-27T03:34:10,264 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57216 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-27T03:34:10,264 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57216 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57216 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,265 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:10,268 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46751 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,268 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57230 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741856_1039 to mirror 127.0.0.1:46751 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,269 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]) is bad. 2024-11-27T03:34:10,269 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741856_1039 2024-11-27T03:34:10,269 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57230 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-27T03:34:10,269 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57230 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57230 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,269 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK] 2024-11-27T03:34:10,272 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41625 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,272 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57236 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741857_1040 to mirror 127.0.0.1:41625 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,273 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:10,273 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741857_1040 2024-11-27T03:34:10,273 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57236 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-27T03:34:10,273 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57236 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57236 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,273 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:10,274 WARN [IPC Server handler 2 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-27T03:34:10,274 WARN [IPC Server handler 2 on default port 44897 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-27T03:34:10,274 WARN [IPC Server handler 2 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-27T03:34:10,277 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:10,277 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:10,277 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:10,277 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:10,277 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:10,277 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678446228 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678450252 2024-11-27T03:34:10,278 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32925:32925)] 2024-11-27T03:34:10,278 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 is not closed yet, will try archiving it next time 2024-11-27T03:34:10,278 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678446228 is not closed yet, will try archiving it next time 2024-11-27T03:34:10,279 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741843_1026 (size=25992) 2024-11-27T03:34:10,280 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678442211 to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/oldWALs/66d3634da959%2C38459%2C1732678428873.1732678442211 2024-11-27T03:34:10,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38459 {}] regionserver.HRegion(8855): Flush requested on 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:34:10,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30ae0c164627c581508f27dd9b02481f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-27T03:34:10,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/f5187121ef86476d91e82d0cada8b4e2 is 1079, key is tmprow/info:/1732678450437/Put/seqid=0 2024-11-27T03:34:10,450 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,450 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]) is bad. 2024-11-27T03:34:10,450 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741859_1042 2024-11-27T03:34:10,451 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK] 2024-11-27T03:34:10,452 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,452 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK], DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:10,452 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741860_1043 2024-11-27T03:34:10,453 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:10,455 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41621 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57246 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741861_1044 to mirror 127.0.0.1:41621 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,455 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:10,455 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741861_1044 2024-11-27T03:34:10,455 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57246 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-27T03:34:10,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57246 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57246 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,456 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:10,457 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,457 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:10,457 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741862_1045 2024-11-27T03:34:10,458 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:10,458 WARN [IPC Server handler 0 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-27T03:34:10,458 WARN [IPC Server handler 0 on default port 44897 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-27T03:34:10,458 WARN [IPC Server handler 0 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-27T03:34:10,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741863_1046 (size=6027) 2024-11-27T03:34:10,681 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 is not closed yet, will try archiving it next time 2024-11-27T03:34:10,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/f5187121ef86476d91e82d0cada8b4e2 2024-11-27T03:34:10,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/f5187121ef86476d91e82d0cada8b4e2 as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/f5187121ef86476d91e82d0cada8b4e2 2024-11-27T03:34:10,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/f5187121ef86476d91e82d0cada8b4e2, entries=1, sequenceid=34, filesize=5.9 K 2024-11-27T03:34:10,885 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 30ae0c164627c581508f27dd9b02481f in 445ms, sequenceid=34, compaction requested=true 2024-11-27T03:34:10,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30ae0c164627c581508f27dd9b02481f: 2024-11-27T03:34:10,886 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-27T03:34:10,886 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:10,886 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/204c8f728b354b41a59559d83cafe283 because midkey is the same as first or last row 2024-11-27T03:34:10,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 30ae0c164627c581508f27dd9b02481f:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:34:10,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:34:10,886 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:34:10,888 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:34:10,888 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HStore(1541): 30ae0c164627c581508f27dd9b02481f/info is initiating minor compaction (all files) 2024-11-27T03:34:10,888 INFO [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 30ae0c164627c581508f27dd9b02481f/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:34:10,888 INFO [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/3b71555f0a044c67924c09450eb5175f, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/204c8f728b354b41a59559d83cafe283, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/f5187121ef86476d91e82d0cada8b4e2] into tmpdir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp, totalSize=28.2 K 2024-11-27T03:34:10,889 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3b71555f0a044c67924c09450eb5175f, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732678444318 2024-11-27T03:34:10,889 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 204c8f728b354b41a59559d83cafe283, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732678448378 2024-11-27T03:34:10,890 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] compactions.Compactor(225): Compacting f5187121ef86476d91e82d0cada8b4e2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732678450437 2024-11-27T03:34:10,903 INFO [RS:0;66d3634da959:38459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 30ae0c164627c581508f27dd9b02481f#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:34:10,904 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/89ea6e31a08f4c26ad5060af22e3ff33 is 1080, key is row0002/info:/1732678444318/Put/seqid=0 2024-11-27T03:34:10,906 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,906 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:10,906 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741864_1047 2024-11-27T03:34:10,906 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:10,908 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,908 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:10,908 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741865_1048 2024-11-27T03:34:10,908 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:10,910 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46751 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,910 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57284 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741866_1049 to mirror 127.0.0.1:46751 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,911 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]) is bad. 2024-11-27T03:34:10,911 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57284 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-27T03:34:10,911 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741866_1049 2024-11-27T03:34:10,911 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57284 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57284 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,911 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK] 2024-11-27T03:34:10,913 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41621 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:10,913 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57290 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741867_1050 to mirror 127.0.0.1:41621 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,913 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:10,913 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57290 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-27T03:34:10,914 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741867_1050 2024-11-27T03:34:10,914 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:57290 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57290 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:10,914 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:10,915 WARN [IPC Server handler 2 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-27T03:34:10,915 WARN [IPC Server handler 2 on default port 44897 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-27T03:34:10,915 WARN [IPC Server handler 2 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-27T03:34:10,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741868_1051 (size=17994) 2024-11-27T03:34:10,999 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:11,334 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/89ea6e31a08f4c26ad5060af22e3ff33 as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/89ea6e31a08f4c26ad5060af22e3ff33 2024-11-27T03:34:11,341 INFO [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 30ae0c164627c581508f27dd9b02481f/info of 30ae0c164627c581508f27dd9b02481f into 89ea6e31a08f4c26ad5060af22e3ff33(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 30ae0c164627c581508f27dd9b02481f: 2024-11-27T03:34:11,342 INFO [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f., storeName=30ae0c164627c581508f27dd9b02481f/info, priority=13, startTime=1732678450886; duration=0sec 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/89ea6e31a08f4c26ad5060af22e3ff33 because midkey is the same as first or last row 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/89ea6e31a08f4c26ad5060af22e3ff33 because midkey is the same as first or last row 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/89ea6e31a08f4c26ad5060af22e3ff33 because midkey is the same as first or last row 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:34:11,342 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 30ae0c164627c581508f27dd9b02481f:info 2024-11-27T03:34:11,730 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@52f32063[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741848_1031 to 127.0.0.1:41621 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:11,730 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c34c44d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741853_1036 to 127.0.0.1:41625 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:11,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38459 {}] regionserver.HRegion(8855): Flush requested on 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:34:11,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30ae0c164627c581508f27dd9b02481f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-27T03:34:11,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/5f17bffce3ac402db46b04eb5201693b is 1079, key is tmprow/info:/1732678451875/Put/seqid=0 2024-11-27T03:34:11,889 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:11,890 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]) is bad. 2024-11-27T03:34:11,890 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741869_1052 2024-11-27T03:34:11,891 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK] 2024-11-27T03:34:11,894 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41621 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:11,894 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54140 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741870_1053 to mirror 127.0.0.1:41621 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:11,894 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:11,894 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741870_1053 2024-11-27T03:34:11,895 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54140 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-27T03:34:11,895 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54140 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54140 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:11,895 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:11,899 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41625 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:11,899 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54142 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741871_1054 to mirror 127.0.0.1:41625 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:11,899 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:11,899 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741871_1054 2024-11-27T03:34:11,899 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54142 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-27T03:34:11,900 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54142 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54142 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:11,900 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:11,902 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45573 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:11,902 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54146 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741872_1055 to mirror 127.0.0.1:45573 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:11,902 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:11,902 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741872_1055 2024-11-27T03:34:11,902 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54146 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-27T03:34:11,902 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54146 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54146 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:11,903 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:11,904 WARN [IPC Server handler 0 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-27T03:34:11,904 WARN [IPC Server handler 0 on default port 44897 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-27T03:34:11,904 WARN [IPC Server handler 0 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-27T03:34:11,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741873_1056 (size=6027) 2024-11-27T03:34:12,279 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:12,280 WARN [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]] 2024-11-27T03:34:12,281 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:12,281 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C38459%2C1732678428873:(num 1732678450252) roll requested 2024-11-27T03:34:12,282 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C38459%2C1732678428873.1732678452281 2024-11-27T03:34:12,288 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:12,289 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:12,289 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741874_1057 2024-11-27T03:34:12,290 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:12,292 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:12,293 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK], DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:12,293 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741875_1058 2024-11-27T03:34:12,294 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:12,296 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:12,296 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:12,296 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741876_1059 2024-11-27T03:34:12,297 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:12,300 WARN [Thread-976 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46751 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:12,300 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54172 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741877_1060 to mirror 127.0.0.1:46751 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:12,300 WARN [Thread-976 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]) is bad. 2024-11-27T03:34:12,300 WARN [Thread-976 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741877_1060 2024-11-27T03:34:12,300 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54172 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-27T03:34:12,301 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54172 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54172 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:12,301 WARN [Thread-976 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK] 2024-11-27T03:34:12,302 WARN [IPC Server handler 2 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-27T03:34:12,302 WARN [IPC Server handler 2 on default port 44897 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-27T03:34:12,302 WARN [IPC Server handler 2 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-27T03:34:12,304 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:12,304 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:12,304 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:12,304 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:12,304 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:12,305 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678450252 with entries=14, filesize=12.92 KB; new WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678452281 2024-11-27T03:34:12,306 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32925:32925)] 2024-11-27T03:34:12,306 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 is not closed yet, will try archiving it next time 2024-11-27T03:34:12,306 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678450252 is not closed yet, will try archiving it next time 2024-11-27T03:34:12,306 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678446228 to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/oldWALs/66d3634da959%2C38459%2C1732678428873.1732678446228 2024-11-27T03:34:12,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741858_1041 (size=13234) 2024-11-27T03:34:12,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/5f17bffce3ac402db46b04eb5201693b 2024-11-27T03:34:12,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/5f17bffce3ac402db46b04eb5201693b as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/5f17bffce3ac402db46b04eb5201693b 2024-11-27T03:34:12,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/5f17bffce3ac402db46b04eb5201693b, entries=1, sequenceid=45, filesize=5.9 K 2024-11-27T03:34:12,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 30ae0c164627c581508f27dd9b02481f in 441ms, sequenceid=45, compaction requested=false 2024-11-27T03:34:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30ae0c164627c581508f27dd9b02481f: 2024-11-27T03:34:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-27T03:34:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:12,320 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/89ea6e31a08f4c26ad5060af22e3ff33 because midkey is the same as first or last row 2024-11-27T03:34:12,708 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 is not closed yet, will try archiving it next time 2024-11-27T03:34:12,728 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c34c44d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741843_1026 to 127.0.0.1:46751 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:12,728 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@52f32063[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741863_1046 to 127.0.0.1:46751 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:13,000 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38459 {}] regionserver.HRegion(8855): Flush requested on 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:34:13,314 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30ae0c164627c581508f27dd9b02481f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-27T03:34:13,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/ad81e109a0204c7d83a29cffc161a8f7 is 1079, key is tmprow/info:/1732678453311/Put/seqid=0 2024-11-27T03:34:13,323 WARN [Thread-981 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:13,324 WARN [Thread-981 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]) is bad. 2024-11-27T03:34:13,324 WARN [Thread-981 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741879_1062 2024-11-27T03:34:13,324 WARN [Thread-981 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK] 2024-11-27T03:34:13,327 WARN [Thread-981 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41625 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:13,327 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54204 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741880_1063 to mirror 127.0.0.1:41625 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:13,327 WARN [Thread-981 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:13,327 WARN [Thread-981 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741880_1063 2024-11-27T03:34:13,328 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54204 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-27T03:34:13,328 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54204 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54204 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:13,328 WARN [Thread-981 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:13,330 WARN [Thread-981 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:13,331 WARN [Thread-981 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:13,331 WARN [Thread-981 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741881_1064 2024-11-27T03:34:13,331 WARN [Thread-981 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:13,333 WARN [Thread-981 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:13,333 WARN [Thread-981 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:13,333 WARN [Thread-981 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741882_1065 2024-11-27T03:34:13,334 WARN [Thread-981 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:13,334 WARN [IPC Server handler 0 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-27T03:34:13,334 WARN [IPC Server handler 0 on default port 44897 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-27T03:34:13,334 WARN [IPC Server handler 0 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-27T03:34:13,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741883_1066 (size=6027) 2024-11-27T03:34:13,738 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/ad81e109a0204c7d83a29cffc161a8f7 2024-11-27T03:34:13,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/ad81e109a0204c7d83a29cffc161a8f7 as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ad81e109a0204c7d83a29cffc161a8f7 2024-11-27T03:34:13,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ad81e109a0204c7d83a29cffc161a8f7, entries=1, sequenceid=55, filesize=5.9 K 2024-11-27T03:34:13,754 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 30ae0c164627c581508f27dd9b02481f in 439ms, sequenceid=55, compaction requested=true 2024-11-27T03:34:13,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30ae0c164627c581508f27dd9b02481f: 2024-11-27T03:34:13,754 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-27T03:34:13,754 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:13,754 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/89ea6e31a08f4c26ad5060af22e3ff33 because midkey is the same as first or last row 2024-11-27T03:34:13,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 30ae0c164627c581508f27dd9b02481f:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:34:13,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:34:13,754 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:34:13,756 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:34:13,756 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HStore(1541): 30ae0c164627c581508f27dd9b02481f/info is initiating minor compaction (all files) 2024-11-27T03:34:13,756 INFO [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 30ae0c164627c581508f27dd9b02481f/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:34:13,756 INFO [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/89ea6e31a08f4c26ad5060af22e3ff33, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/5f17bffce3ac402db46b04eb5201693b, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ad81e109a0204c7d83a29cffc161a8f7] into tmpdir=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp, totalSize=29.3 K 2024-11-27T03:34:13,756 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 89ea6e31a08f4c26ad5060af22e3ff33, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732678444318 2024-11-27T03:34:13,757 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f17bffce3ac402db46b04eb5201693b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732678451875 2024-11-27T03:34:13,757 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] compactions.Compactor(225): Compacting ad81e109a0204c7d83a29cffc161a8f7, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732678453311 2024-11-27T03:34:13,772 INFO [RS:0;66d3634da959:38459-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 30ae0c164627c581508f27dd9b02481f#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:34:13,772 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/ced4acb6a02f4ddea65234032ea46fbf is 1080, key is row0002/info:/1732678444318/Put/seqid=0 2024-11-27T03:34:13,775 WARN [Thread-986 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41621 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:13,775 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54228 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741884_1067 to mirror 127.0.0.1:41621 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:13,775 WARN [Thread-986 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]) is bad. 2024-11-27T03:34:13,775 WARN [Thread-986 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741884_1067 2024-11-27T03:34:13,775 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54228 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-27T03:34:13,775 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54228 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54228 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:13,776 WARN [Thread-986 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41621,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK] 2024-11-27T03:34:13,777 WARN [Thread-986 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:13,777 WARN [Thread-986 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:13,777 WARN [Thread-986 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741885_1068 2024-11-27T03:34:13,778 WARN [Thread-986 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:13,779 WARN [Thread-986 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:13,779 WARN [Thread-986 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK], DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:13,779 WARN [Thread-986 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741886_1069 2024-11-27T03:34:13,780 WARN [Thread-986 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:13,782 WARN [Thread-986 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46751 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:13,781 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54230 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6]'}, localName='127.0.0.1:37315', datanodeUuid='127cfd7c-c7cb-42c1-91ff-5618fffaf028', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741887_1070 to mirror 127.0.0.1:46751 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:13,782 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54230 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-27T03:34:13,782 WARN [Thread-986 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK], DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]) is bad. 2024-11-27T03:34:13,782 WARN [Thread-986 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741887_1070 2024-11-27T03:34:13,782 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1936866797_22 at /127.0.0.1:54230 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:37315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54230 dst: /127.0.0.1:37315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:13,782 WARN [Thread-986 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK] 2024-11-27T03:34:13,783 WARN [IPC Server handler 4 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-27T03:34:13,783 WARN [IPC Server handler 4 on default port 44897 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-27T03:34:13,783 WARN [IPC Server handler 4 on default port 44897 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-27T03:34:13,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741888_1071 (size=18097) 2024-11-27T03:34:14,203 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/ced4acb6a02f4ddea65234032ea46fbf as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ced4acb6a02f4ddea65234032ea46fbf 2024-11-27T03:34:14,212 INFO [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 30ae0c164627c581508f27dd9b02481f/info of 30ae0c164627c581508f27dd9b02481f into ced4acb6a02f4ddea65234032ea46fbf(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:34:14,212 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 30ae0c164627c581508f27dd9b02481f: 2024-11-27T03:34:14,212 INFO [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f., storeName=30ae0c164627c581508f27dd9b02481f/info, priority=13, startTime=1732678453754; duration=0sec 2024-11-27T03:34:14,212 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-27T03:34:14,212 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:14,213 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ced4acb6a02f4ddea65234032ea46fbf because midkey is the same as first or last row 2024-11-27T03:34:14,213 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-27T03:34:14,213 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:14,213 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ced4acb6a02f4ddea65234032ea46fbf because midkey is the same as first or last row 2024-11-27T03:34:14,213 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-27T03:34:14,213 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:14,213 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ced4acb6a02f4ddea65234032ea46fbf because midkey is the same as first or last row 2024-11-27T03:34:14,213 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:34:14,213 DEBUG [RS:0;66d3634da959:38459-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 30ae0c164627c581508f27dd9b02481f:info 2024-11-27T03:34:14,280 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:14,306 WARN [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-27T03:34:14,306 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:14,338 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:34:14,342 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:34:14,343 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:34:14,343 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:34:14,343 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:34:14,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e00c8cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:34:14,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22c6c03b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:34:14,434 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3eb1ba79{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/java.io.tmpdir/jetty-localhost-40173-hadoop-hdfs-3_4_1-tests_jar-_-any-2908261808059358831/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:14,435 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@117da513{HTTP/1.1, (http/1.1)}{localhost:40173} 2024-11-27T03:34:14,435 INFO [Time-limited test {}] server.Server(415): Started @135479ms 2024-11-27T03:34:14,436 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:34:14,729 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@52f32063[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741868_1051 to 127.0.0.1:45573 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:14,730 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c34c44d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741873_1056 to 127.0.0.1:46751 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:14,919 WARN [Thread-1006 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:34:14,923 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5bfc1d35b4d94bfb with lease ID 0xdc375885b0dc55b7: from storage DS-6ef65769-1808-486b-900d-c1a8593676a8 node DatanodeRegistration(127.0.0.1:42707, datanodeUuid=4404529f-685b-4abe-8546-99a2e400169c, infoPort=39427, infoSecurePort=0, ipcPort=44779, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-27T03:34:14,923 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5bfc1d35b4d94bfb with lease ID 0xdc375885b0dc55b7: from storage DS-d56a45d7-5d5e-4eec-a8b2-9911548cdd60 node DatanodeRegistration(127.0.0.1:42707, datanodeUuid=4404529f-685b-4abe-8546-99a2e400169c, infoPort=39427, infoSecurePort=0, ipcPort=44779, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-27T03:34:15,000 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:15,729 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5c34c44d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741858_1041 to 127.0.0.1:45573 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:15,729 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@52f32063[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741883_1066 to 127.0.0.1:46751 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:16,280 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:16,306 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:17,001 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:17,728 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@52f32063[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:37315, datanodeUuid=127cfd7c-c7cb-42c1-91ff-5618fffaf028, infoPort=32925, infoSecurePort=0, ipcPort=36745, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741888_1071 to 127.0.0.1:41625 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:18,280 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:18,307 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:18,683 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T03:34:19,001 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:19,235 ERROR [FSHLog-0-hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData-prefix:66d3634da959,45173,1732678428709 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:19,235 WARN [FSHLog-0-hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData-prefix:66d3634da959,45173,1732678428709 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:19,235 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C45173%2C1732678428709:(num 1732678429019) roll requested 2024-11-27T03:34:19,237 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C45173%2C1732678428709.1732678459236 2024-11-27T03:34:19,244 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:19,244 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK]) is bad. 2024-11-27T03:34:19,244 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741889_1072 2024-11-27T03:34:19,245 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45573,DS-09b9f7a9-ce97-4b33-8b2b-9296c90ce6a1,DISK] 2024-11-27T03:34:19,250 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:19,250 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:19,250 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:19,250 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:19,250 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:19,251 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678459236 2024-11-27T03:34:19,251 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:19,251 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:19,251 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 2024-11-27T03:34:19,252 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39427:39427),(127.0.0.1/127.0.0.1:32925:32925)] 2024-11-27T03:34:19,252 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 is not closed yet, will try archiving it next time 2024-11-27T03:34:19,252 WARN [IPC Server handler 1 on default port 44897 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1014 2024-11-27T03:34:19,252 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 after 1ms 2024-11-27T03:34:20,281 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:20,307 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:22,282 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:22,308 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:23,255 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 after 4004ms 2024-11-27T03:34:24,283 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:24,309 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:26,283 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:26,309 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,150 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C38459%2C1732678428873.1732678468150 2024-11-27T03:34:28,160 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41625 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,160 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037953256_22 at /127.0.0.1:55364 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data4]'}, localName='127.0.0.1:42707', datanodeUuid='4404529f-685b-4abe-8546-99a2e400169c', xmitsInProgress=0}:Exception transferring block BP-1346822850-172.17.0.3-1732678426327:blk_1073741891_1075 to mirror 127.0.0.1:41625 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:28,161 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42707,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK], DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:28,161 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741891_1075 2024-11-27T03:34:28,161 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1037953256_22 at /127.0.0.1:55364 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-27T03:34:28,161 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1037953256_22 at /127.0.0.1:55364 [Receiving block BP-1346822850-172.17.0.3-1732678426327:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:42707:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55364 dst: /127.0.0.1:42707 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:28,162 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:28,168 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,168 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,168 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,168 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,169 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,169 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678452281 with entries=13, filesize=11.81 KB; new WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678468150 2024-11-27T03:34:28,170 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32925:32925),(127.0.0.1/127.0.0.1:39427:39427)] 2024-11-27T03:34:28,170 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 is not closed yet, will try archiving it next time 2024-11-27T03:34:28,170 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678452281 is not closed yet, will try archiving it next time 2024-11-27T03:34:28,170 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678450252 to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/oldWALs/66d3634da959%2C38459%2C1732678428873.1732678450252 2024-11-27T03:34:28,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741878_1061 (size=12100) 2024-11-27T03:34:28,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38459 {}] regionserver.HRegion(8855): Flush requested on 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:34:28,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 30ae0c164627c581508f27dd9b02481f 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-27T03:34:28,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/a834816ea5a1448291e4bd2433c6423c is 1080, key is row0013/info:/1732678468172/Put/seqid=0 2024-11-27T03:34:28,186 WARN [Thread-1038 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,186 WARN [Thread-1038 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK], DatanodeInfoWithStorage[127.0.0.1:42707,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:28,186 WARN [Thread-1038 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741893_1077 2024-11-27T03:34:28,187 WARN [Thread-1038 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:28,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741894_1078 (size=9267) 2024-11-27T03:34:28,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741894_1078 (size=9267) 2024-11-27T03:34:28,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/a834816ea5a1448291e4bd2433c6423c 2024-11-27T03:34:28,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/a834816ea5a1448291e4bd2433c6423c as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/a834816ea5a1448291e4bd2433c6423c 2024-11-27T03:34:28,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/a834816ea5a1448291e4bd2433c6423c, entries=4, sequenceid=66, filesize=9.0 K 2024-11-27T03:34:28,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for 30ae0c164627c581508f27dd9b02481f in 34ms, sequenceid=66, compaction requested=false 2024-11-27T03:34:28,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 30ae0c164627c581508f27dd9b02481f: 2024-11-27T03:34:28,211 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-11-27T03:34:28,211 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:34:28,211 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ced4acb6a02f4ddea65234032ea46fbf because midkey is the same as first or last row 2024-11-27T03:34:28,284 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,310 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-27T03:34:28,310 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-27T03:34:28,397 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:34:28,398 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:34:28,398 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:34:28,399 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:34:28,399 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-27T03:34:28,399 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-27T03:34:28,399 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1274879632, stopped=false 2024-11-27T03:34:28,400 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=66d3634da959,45173,1732678428709 2024-11-27T03:34:28,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40479-0x1017a5aa3ba0002, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:34:28,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:34:28,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:34:28,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:28,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40479-0x1017a5aa3ba0002, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:28,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:28,462 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:34:28,463 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:34:28,463 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:34:28,463 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:34:28,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:34:28,464 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40479-0x1017a5aa3ba0002, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:34:28,464 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '66d3634da959,38459,1732678428873' ***** 2024-11-27T03:34:28,464 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:34:28,464 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-27T03:34:28,464 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '66d3634da959,40479,1732678430168' ***** 2024-11-27T03:34:28,464 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-27T03:34:28,464 INFO [RS:0;66d3634da959:38459 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-27T03:34:28,464 INFO [RS:1;66d3634da959:40479 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-27T03:34:28,465 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-27T03:34:28,465 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-27T03:34:28,465 INFO [RS:0;66d3634da959:38459 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-27T03:34:28,465 INFO [RS:0;66d3634da959:38459 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-27T03:34:28,465 INFO [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(3091): Received CLOSE for 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:34:28,465 INFO [RS:1;66d3634da959:40479 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-27T03:34:28,465 INFO [RS:1;66d3634da959:40479 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-27T03:34:28,466 INFO [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(959): stopping server 66d3634da959,40479,1732678430168 2024-11-27T03:34:28,466 INFO [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(959): stopping server 66d3634da959,38459,1732678428873 2024-11-27T03:34:28,466 INFO [RS:0;66d3634da959:38459 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:34:28,466 INFO [RS:1;66d3634da959:40479 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:34:28,466 INFO [RS:0;66d3634da959:38459 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;66d3634da959:38459. 2024-11-27T03:34:28,466 INFO [RS:1;66d3634da959:40479 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;66d3634da959:40479. 2024-11-27T03:34:28,466 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 30ae0c164627c581508f27dd9b02481f, disabling compactions & flushes 2024-11-27T03:34:28,466 DEBUG [RS:1;66d3634da959:40479 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:34:28,466 DEBUG [RS:0;66d3634da959:38459 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:34:28,466 DEBUG [RS:1;66d3634da959:40479 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:34:28,466 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:34:28,466 DEBUG [RS:0;66d3634da959:38459 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:34:28,466 INFO [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(976): stopping server 66d3634da959,40479,1732678430168; all regions closed. 2024-11-27T03:34:28,466 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:34:28,466 INFO [RS:0;66d3634da959:38459 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-27T03:34:28,466 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. after waiting 0 ms 2024-11-27T03:34:28,466 INFO [RS:0;66d3634da959:38459 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-27T03:34:28,466 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:34:28,467 INFO [RS:0;66d3634da959:38459 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-27T03:34:28,467 INFO [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-27T03:34:28,467 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 30ae0c164627c581508f27dd9b02481f 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-27T03:34:28,467 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,467 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,467 INFO [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-27T03:34:28,467 DEBUG [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1325): Online Regions={30ae0c164627c581508f27dd9b02481f=TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f., 1588230740=hbase:meta,,1.1588230740} 2024-11-27T03:34:28,467 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,467 DEBUG [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:34:28,467 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:34:28,468 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:34:28,468 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,468 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:34:28,468 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,468 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:34:28,468 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:34:28,468 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-27T03:34:28,468 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,468 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,468 ERROR [FSHLog-0-hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4-prefix:66d3634da959,38459,1732678428873.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,469 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 2024-11-27T03:34:28,469 WARN [FSHLog-0-hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4-prefix:66d3634da959,38459,1732678428873.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,469 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C38459%2C1732678428873.meta:.meta(num 1732678429952) roll requested 2024-11-27T03:34:28,469 WARN [IPC Server handler 1 on default port 44897 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 has not been closed. Lease recovery is in progress. RecoveryId = 1079 for block blk_1073741837_1015 2024-11-27T03:34:28,469 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C38459%2C1732678428873.meta.1732678468469.meta 2024-11-27T03:34:28,469 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 after 0ms 2024-11-27T03:34:28,474 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/82c2cc7d575d4a21b20bcb91224d5f4c is 1080, key is row0016/info:/1732678468179/Put/seqid=0 2024-11-27T03:34:28,477 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,477 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,477 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,477 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,477 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:28,477 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678468469.meta 2024-11-27T03:34:28,478 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741896_1081 (size=13583) 2024-11-27T03:34:28,478 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46751,DS-c89509ba-24d7-4de6-84b6-ec35f438cb00,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,478 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta 2024-11-27T03:34:28,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741896_1081 (size=13583) 2024-11-27T03:34:28,479 WARN [IPC Server handler 2 on default port 44897 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741834_1017 2024-11-27T03:34:28,479 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32925:32925),(127.0.0.1/127.0.0.1:39427:39427)] 2024-11-27T03:34:28,479 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta after 1ms 2024-11-27T03:34:28,479 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta is not closed yet, will try archiving it next time 2024-11-27T03:34:28,498 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/.tmp/info/2b5dc9137cfc4a1b9c389b4ed05fb7a6 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f./info:regioninfo/1732678430675/Put/seqid=0 2024-11-27T03:34:28,500 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,500 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK], DatanodeInfoWithStorage[127.0.0.1:42707,DS-6ef65769-1808-486b-900d-c1a8593676a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:28,500 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741897_1083 2024-11-27T03:34:28,501 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:28,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741898_1084 (size=7089) 2024-11-27T03:34:28,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741898_1084 (size=7089) 2024-11-27T03:34:28,506 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/.tmp/info/2b5dc9137cfc4a1b9c389b4ed05fb7a6 2024-11-27T03:34:28,532 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/.tmp/ns/b773588c841f45d6959d0469b5cff5a1 is 43, key is default/ns:d/1732678430041/Put/seqid=0 2024-11-27T03:34:28,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741899_1085 (size=5153) 2024-11-27T03:34:28,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741899_1085 (size=5153) 2024-11-27T03:34:28,537 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/.tmp/ns/b773588c841f45d6959d0469b5cff5a1 2024-11-27T03:34:28,562 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/.tmp/table/6631ab164f9e4756ad787c680dba84de is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732678430688/Put/seqid=0 2024-11-27T03:34:28,564 WARN [Thread-1070 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:34:28,565 WARN [Thread-1070 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1346822850-172.17.0.3-1732678426327:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK], DatanodeInfoWithStorage[127.0.0.1:37315,DS-e4dff13c-e3e9-4f29-8e3a-c793d09e5ef0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK]) is bad. 2024-11-27T03:34:28,565 WARN [Thread-1070 {}] hdfs.DataStreamer(1850): Abandoning BP-1346822850-172.17.0.3-1732678426327:blk_1073741900_1086 2024-11-27T03:34:28,565 WARN [Thread-1070 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41625,DS-beceb969-bf1b-4d1d-8062-b41575a8a890,DISK] 2024-11-27T03:34:28,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741901_1087 (size=5424) 2024-11-27T03:34:28,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741901_1087 (size=5424) 2024-11-27T03:34:28,570 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/.tmp/table/6631ab164f9e4756ad787c680dba84de 2024-11-27T03:34:28,571 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 is not closed yet, will try archiving it next time 2024-11-27T03:34:28,572 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678452281 to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/oldWALs/66d3634da959%2C38459%2C1732678428873.1732678452281 2024-11-27T03:34:28,576 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/.tmp/info/2b5dc9137cfc4a1b9c389b4ed05fb7a6 as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/info/2b5dc9137cfc4a1b9c389b4ed05fb7a6 2024-11-27T03:34:28,583 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/info/2b5dc9137cfc4a1b9c389b4ed05fb7a6, entries=10, sequenceid=11, filesize=6.9 K 2024-11-27T03:34:28,584 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/.tmp/ns/b773588c841f45d6959d0469b5cff5a1 as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/ns/b773588c841f45d6959d0469b5cff5a1 2024-11-27T03:34:28,591 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/ns/b773588c841f45d6959d0469b5cff5a1, entries=2, sequenceid=11, filesize=5.0 K 2024-11-27T03:34:28,592 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/.tmp/table/6631ab164f9e4756ad787c680dba84de as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/table/6631ab164f9e4756ad787c680dba84de 2024-11-27T03:34:28,598 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/table/6631ab164f9e4756ad787c680dba84de, entries=2, sequenceid=11, filesize=5.3 K 2024-11-27T03:34:28,599 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false 2024-11-27T03:34:28,604 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-27T03:34:28,605 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:34:28,605 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:34:28,605 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678468467Running coprocessor pre-close hooks at 1732678468467Disabling compacts and flushes for region at 1732678468467Disabling writes for close at 1732678468468 (+1 ms)Obtaining lock to block concurrent updates at 1732678468468Preparing flush snapshotting stores in 1588230740 at 1732678468468Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732678468469 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732678468479 (+10 ms)Flushing 1588230740/info: creating writer at 1732678468479Flushing 1588230740/info: appending metadata at 1732678468498 (+19 ms)Flushing 1588230740/info: closing flushed file at 1732678468498Flushing 1588230740/ns: creating writer at 1732678468513 (+15 ms)Flushing 1588230740/ns: appending metadata at 1732678468531 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732678468531Flushing 1588230740/table: creating writer at 1732678468544 (+13 ms)Flushing 1588230740/table: appending metadata at 1732678468562 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732678468562Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fac6daa: reopening flushed file at 1732678468575 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3236ad18: reopening flushed file at 1732678468583 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30d6199e: reopening flushed file at 1732678468591 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 131ms, sequenceid=11, compaction requested=false at 1732678468599 (+8 ms)Writing region close event to WAL at 1732678468601 (+2 ms)Running coprocessor post-close hooks at 1732678468605 (+4 ms)Closed at 1732678468605 2024-11-27T03:34:28,605 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-27T03:34:28,668 DEBUG [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1351): Waiting on 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:34:28,868 DEBUG [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1351): Waiting on 30ae0c164627c581508f27dd9b02481f 2024-11-27T03:34:28,881 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/82c2cc7d575d4a21b20bcb91224d5f4c 2024-11-27T03:34:28,895 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/.tmp/info/82c2cc7d575d4a21b20bcb91224d5f4c as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/82c2cc7d575d4a21b20bcb91224d5f4c 2024-11-27T03:34:28,902 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/82c2cc7d575d4a21b20bcb91224d5f4c, entries=8, sequenceid=77, filesize=13.3 K 2024-11-27T03:34:28,903 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 30ae0c164627c581508f27dd9b02481f in 436ms, sequenceid=77, compaction requested=true 2024-11-27T03:34:28,904 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/3b71555f0a044c67924c09450eb5175f, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/204c8f728b354b41a59559d83cafe283, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/89ea6e31a08f4c26ad5060af22e3ff33, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/f5187121ef86476d91e82d0cada8b4e2, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/5f17bffce3ac402db46b04eb5201693b, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ad81e109a0204c7d83a29cffc161a8f7] to archive 2024-11-27T03:34:28,905 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T03:34:28,907 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/3b71555f0a044c67924c09450eb5175f to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/3b71555f0a044c67924c09450eb5175f 2024-11-27T03:34:28,908 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/204c8f728b354b41a59559d83cafe283 to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/204c8f728b354b41a59559d83cafe283 2024-11-27T03:34:28,910 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/89ea6e31a08f4c26ad5060af22e3ff33 to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/89ea6e31a08f4c26ad5060af22e3ff33 2024-11-27T03:34:28,911 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/f5187121ef86476d91e82d0cada8b4e2 to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/f5187121ef86476d91e82d0cada8b4e2 2024-11-27T03:34:28,912 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/5f17bffce3ac402db46b04eb5201693b to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/5f17bffce3ac402db46b04eb5201693b 2024-11-27T03:34:28,914 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ad81e109a0204c7d83a29cffc161a8f7 to hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/info/ad81e109a0204c7d83a29cffc161a8f7 2024-11-27T03:34:28,914 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=66d3634da959:45173 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-27T03:34:28,914 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3b71555f0a044c67924c09450eb5175f=10347, 204c8f728b354b41a59559d83cafe283=12506, 89ea6e31a08f4c26ad5060af22e3ff33=17994, f5187121ef86476d91e82d0cada8b4e2=6027, 5f17bffce3ac402db46b04eb5201693b=6027, ad81e109a0204c7d83a29cffc161a8f7=6027] 2024-11-27T03:34:28,918 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/data/default/TestLogRolling-testLogRollOnDatanodeDeath/30ae0c164627c581508f27dd9b02481f/recovered.edits/80.seqid, newMaxSeqId=80, maxSeqId=1 2024-11-27T03:34:28,919 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:34:28,919 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 30ae0c164627c581508f27dd9b02481f: Waiting for close lock at 1732678468466Running coprocessor pre-close hooks at 1732678468466Disabling compacts and flushes for region at 1732678468466Disabling writes for close at 1732678468466Obtaining lock to block concurrent updates at 1732678468467 (+1 ms)Preparing flush snapshotting stores in 30ae0c164627c581508f27dd9b02481f at 1732678468467Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f., syncing WAL and waiting on mvcc, flushsize=dataSize=8607, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1732678468467Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. at 1732678468468 (+1 ms)Flushing 30ae0c164627c581508f27dd9b02481f/info: creating writer at 1732678468469 (+1 ms)Flushing 30ae0c164627c581508f27dd9b02481f/info: appending metadata at 1732678468473 (+4 ms)Flushing 30ae0c164627c581508f27dd9b02481f/info: closing flushed file at 1732678468473Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ebe72b8: reopening flushed file at 1732678468893 (+420 ms)Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 30ae0c164627c581508f27dd9b02481f in 436ms, sequenceid=77, compaction requested=true at 1732678468903 (+10 ms)Writing region close event to WAL at 1732678468915 (+12 ms)Running coprocessor post-close hooks at 1732678468919 (+4 ms)Closed at 1732678468919 2024-11-27T03:34:28,919 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732678430308.30ae0c164627c581508f27dd9b02481f. 2024-11-27T03:34:29,068 INFO [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(976): stopping server 66d3634da959,38459,1732678428873; all regions closed. 2024-11-27T03:34:29,069 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:29,069 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:29,069 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:29,070 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:29,070 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:29,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741895_1080 (size=825) 2024-11-27T03:34:29,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741895_1080 (size=825) 2024-11-27T03:34:29,276 INFO [regionserver/66d3634da959:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-27T03:34:29,277 INFO [regionserver/66d3634da959:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-27T03:34:29,372 INFO [regionserver/66d3634da959:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:34:29,394 INFO [regionserver/66d3634da959:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-27T03:34:29,394 INFO [regionserver/66d3634da959:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-27T03:34:29,930 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a7e2eb0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42707, datanodeUuid=4404529f-685b-4abe-8546-99a2e400169c, infoPort=39427, infoSecurePort=0, ipcPort=44779, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741835_1011 to 127.0.0.1:41625 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:29,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:34:30,062 INFO [master/66d3634da959:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-27T03:34:30,062 INFO [master/66d3634da959:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-27T03:34:30,280 INFO [regionserver/66d3634da959:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:34:30,929 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@a7e2eb0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42707, datanodeUuid=4404529f-685b-4abe-8546-99a2e400169c, infoPort=39427, infoSecurePort=0, ipcPort=44779, storageInfo=lv=-57;cid=testClusterID;nsid=887093274;c=1732678426327):Failed to transfer BP-1346822850-172.17.0.3-1732678426327:blk_1073741827_1003 to 127.0.0.1:41625 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:34:30,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:34:32,471 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 after 4002ms 2024-11-27T03:34:32,481 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta after 4003ms 2024-11-27T03:34:32,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741878_1061 (size=12100) 2024-11-27T03:34:32,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:34:32,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:34:33,469 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-27T03:34:33,475 DEBUG [RS:1;66d3634da959:40479 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/oldWALs 2024-11-27T03:34:33,475 INFO [RS:1;66d3634da959:40479 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C40479%2C1732678430168:(num 1732678430409) 2024-11-27T03:34:33,475 DEBUG [RS:1;66d3634da959:40479 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:34:33,475 INFO [RS:1;66d3634da959:40479 {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:34:33,475 INFO [RS:1;66d3634da959:40479 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:34:33,475 INFO [RS:1;66d3634da959:40479 {}] hbase.ChoreService(370): Chore service for: regionserver/66d3634da959:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-27T03:34:33,476 INFO [RS:1;66d3634da959:40479 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-27T03:34:33,476 INFO [RS:1;66d3634da959:40479 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-27T03:34:33,476 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:34:33,476 INFO [RS:1;66d3634da959:40479 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-27T03:34:33,476 INFO [RS:1;66d3634da959:40479 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:34:33,476 INFO [RS:1;66d3634da959:40479 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40479 2024-11-27T03:34:33,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40479-0x1017a5aa3ba0002, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d3634da959,40479,1732678430168 2024-11-27T03:34:33,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:34:33,523 INFO [RS:1;66d3634da959:40479 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:34:33,524 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d3634da959,40479,1732678430168] 2024-11-27T03:34:33,544 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/66d3634da959,40479,1732678430168 already deleted, retry=false 2024-11-27T03:34:33,544 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 66d3634da959,40479,1732678430168 expired; onlineServers=1 2024-11-27T03:34:33,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:33,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:33,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:33,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:33,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:33,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:33,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40479-0x1017a5aa3ba0002, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:34:33,634 INFO [RS:1;66d3634da959:40479 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:34:33,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40479-0x1017a5aa3ba0002, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:34:33,634 INFO [RS:1;66d3634da959:40479 {}] regionserver.HRegionServer(1031): Exiting; stopping=66d3634da959,40479,1732678430168; zookeeper connection closed. 2024-11-27T03:34:33,634 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@659ef3b3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@659ef3b3 2024-11-27T03:34:33,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:33,637 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:33,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:34:34,070 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-27T03:34:34,081 DEBUG [RS:0;66d3634da959:38459 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/oldWALs 2024-11-27T03:34:34,081 INFO [RS:0;66d3634da959:38459 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C38459%2C1732678428873.meta:.meta(num 1732678468469) 2024-11-27T03:34:34,081 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:34,081 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:34,081 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:34,081 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:34,082 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:34,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741892_1076 (size=14682) 2024-11-27T03:34:34,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741892_1076 (size=14682) 2024-11-27T03:34:34,139 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-27T03:34:34,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:34,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:34,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:34,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:34,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:34,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:34,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:34,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:34,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:34,480 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-27T03:34:34,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:35,230 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-27T03:34:35,231 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:34:35,231 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-27T03:34:35,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:35,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:35,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741839_1021 (size=2431) 2024-11-27T03:34:36,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:36,480 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-27T03:34:36,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:37,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:37,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:38,481 INFO [regionserver/66d3634da959:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-27T03:34:38,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:38,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:38,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741836_1012 (size=76) 2024-11-27T03:34:38,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:34:39,082 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-27T03:34:39,092 DEBUG [RS:0;66d3634da959:38459 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/oldWALs 2024-11-27T03:34:39,092 INFO [RS:0;66d3634da959:38459 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C38459%2C1732678428873:(num 1732678468150) 2024-11-27T03:34:39,092 DEBUG [RS:0;66d3634da959:38459 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:34:39,092 INFO [RS:0;66d3634da959:38459 {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:34:39,092 INFO [RS:0;66d3634da959:38459 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:34:39,093 INFO [RS:0;66d3634da959:38459 {}] hbase.ChoreService(370): Chore service for: regionserver/66d3634da959:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-27T03:34:39,093 INFO [RS:0;66d3634da959:38459 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:34:39,093 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:34:39,094 INFO [RS:0;66d3634da959:38459 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38459 2024-11-27T03:34:39,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:34:39,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d3634da959,38459,1732678428873 2024-11-27T03:34:39,144 INFO [RS:0;66d3634da959:38459 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:34:39,146 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d3634da959,38459,1732678428873] 2024-11-27T03:34:39,165 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/66d3634da959,38459,1732678428873 already deleted, retry=false 2024-11-27T03:34:39,165 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 66d3634da959,38459,1732678428873 expired; onlineServers=0 2024-11-27T03:34:39,165 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '66d3634da959,45173,1732678428709' ***** 2024-11-27T03:34:39,165 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-27T03:34:39,165 INFO [M:0;66d3634da959:45173 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:34:39,165 INFO [M:0;66d3634da959:45173 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:34:39,166 DEBUG [M:0;66d3634da959:45173 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-27T03:34:39,166 DEBUG [M:0;66d3634da959:45173 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-27T03:34:39,166 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-27T03:34:39,166 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678429233 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678429233,5,FailOnTimeoutGroup] 2024-11-27T03:34:39,166 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678429233 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678429233,5,FailOnTimeoutGroup] 2024-11-27T03:34:39,166 INFO [M:0;66d3634da959:45173 {}] hbase.ChoreService(370): Chore service for: master/66d3634da959:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-27T03:34:39,167 INFO [M:0;66d3634da959:45173 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:34:39,167 DEBUG [M:0;66d3634da959:45173 {}] master.HMaster(1795): Stopping service threads 2024-11-27T03:34:39,167 INFO [M:0;66d3634da959:45173 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-27T03:34:39,167 INFO [M:0;66d3634da959:45173 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:34:39,167 INFO [M:0;66d3634da959:45173 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-27T03:34:39,168 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-27T03:34:39,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-27T03:34:39,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:39,176 DEBUG [M:0;66d3634da959:45173 {}] zookeeper.ZKUtil(347): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-27T03:34:39,176 WARN [M:0;66d3634da959:45173 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-27T03:34:39,177 INFO [M:0;66d3634da959:45173 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/.lastflushedseqids 2024-11-27T03:34:39,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741902_1088 (size=130) 2024-11-27T03:34:39,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741902_1088 (size=130) 2024-11-27T03:34:39,188 INFO [M:0;66d3634da959:45173 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-27T03:34:39,188 INFO [M:0;66d3634da959:45173 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-27T03:34:39,188 DEBUG [M:0;66d3634da959:45173 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:34:39,188 INFO [M:0;66d3634da959:45173 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:34:39,188 DEBUG [M:0;66d3634da959:45173 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:34:39,188 DEBUG [M:0;66d3634da959:45173 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:34:39,188 DEBUG [M:0;66d3634da959:45173 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:34:39,188 INFO [M:0;66d3634da959:45173 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-11-27T03:34:39,202 DEBUG [M:0;66d3634da959:45173 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d7e8ab0f085a4101aad00b8ac9ea0c34 is 82, key is hbase:meta,,1/info:regioninfo/1732678429986/Put/seqid=0 2024-11-27T03:34:39,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741903_1089 (size=5672) 2024-11-27T03:34:39,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741903_1089 (size=5672) 2024-11-27T03:34:39,207 INFO [M:0;66d3634da959:45173 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d7e8ab0f085a4101aad00b8ac9ea0c34 2024-11-27T03:34:39,227 DEBUG [M:0;66d3634da959:45173 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c15bc231138e43a2993a43e2a922f9c8 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732678430694/Put/seqid=0 2024-11-27T03:34:39,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741904_1090 (size=6254) 2024-11-27T03:34:39,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741904_1090 (size=6254) 2024-11-27T03:34:39,232 INFO [M:0;66d3634da959:45173 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c15bc231138e43a2993a43e2a922f9c8 2024-11-27T03:34:39,237 INFO [M:0;66d3634da959:45173 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c15bc231138e43a2993a43e2a922f9c8 2024-11-27T03:34:39,252 DEBUG [M:0;66d3634da959:45173 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1729816468754bee924be740c342a6ec is 69, key is 66d3634da959,38459,1732678428873/rs:state/1732678429348/Put/seqid=0 2024-11-27T03:34:39,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:34:39,254 INFO [RS:0;66d3634da959:38459 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:34:39,254 INFO [RS:0;66d3634da959:38459 {}] regionserver.HRegionServer(1031): Exiting; stopping=66d3634da959,38459,1732678428873; zookeeper connection closed. 2024-11-27T03:34:39,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38459-0x1017a5aa3ba0001, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:34:39,255 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@647d68c9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@647d68c9 2024-11-27T03:34:39,255 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-27T03:34:39,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741905_1091 (size=5224) 2024-11-27T03:34:39,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741905_1091 (size=5224) 2024-11-27T03:34:39,257 INFO [M:0;66d3634da959:45173 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1729816468754bee924be740c342a6ec 2024-11-27T03:34:39,275 DEBUG [M:0;66d3634da959:45173 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/16308997f91f47279371d0b80e9d887a is 52, key is load_balancer_on/state:d/1732678430124/Put/seqid=0 2024-11-27T03:34:39,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741906_1092 (size=5056) 2024-11-27T03:34:39,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741906_1092 (size=5056) 2024-11-27T03:34:39,280 INFO [M:0;66d3634da959:45173 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/16308997f91f47279371d0b80e9d887a 2024-11-27T03:34:39,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:39,285 DEBUG [M:0;66d3634da959:45173 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d7e8ab0f085a4101aad00b8ac9ea0c34 as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d7e8ab0f085a4101aad00b8ac9ea0c34 2024-11-27T03:34:39,289 INFO [M:0;66d3634da959:45173 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d7e8ab0f085a4101aad00b8ac9ea0c34, entries=8, sequenceid=60, filesize=5.5 K 2024-11-27T03:34:39,290 DEBUG [M:0;66d3634da959:45173 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c15bc231138e43a2993a43e2a922f9c8 as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c15bc231138e43a2993a43e2a922f9c8 2024-11-27T03:34:39,295 INFO [M:0;66d3634da959:45173 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c15bc231138e43a2993a43e2a922f9c8 2024-11-27T03:34:39,295 INFO [M:0;66d3634da959:45173 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c15bc231138e43a2993a43e2a922f9c8, entries=6, sequenceid=60, filesize=6.1 K 2024-11-27T03:34:39,296 DEBUG [M:0;66d3634da959:45173 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1729816468754bee924be740c342a6ec as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1729816468754bee924be740c342a6ec 2024-11-27T03:34:39,301 INFO [M:0;66d3634da959:45173 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1729816468754bee924be740c342a6ec, entries=2, sequenceid=60, filesize=5.1 K 2024-11-27T03:34:39,302 DEBUG [M:0;66d3634da959:45173 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/16308997f91f47279371d0b80e9d887a as hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/16308997f91f47279371d0b80e9d887a 2024-11-27T03:34:39,307 INFO [M:0;66d3634da959:45173 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/16308997f91f47279371d0b80e9d887a, entries=1, sequenceid=60, filesize=4.9 K 2024-11-27T03:34:39,308 INFO [M:0;66d3634da959:45173 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=60, compaction requested=false 2024-11-27T03:34:39,309 INFO [M:0;66d3634da959:45173 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:34:39,309 DEBUG [M:0;66d3634da959:45173 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678479188Disabling compacts and flushes for region at 1732678479188Disabling writes for close at 1732678479188Obtaining lock to block concurrent updates at 1732678479188Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732678479188Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1732678479189 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732678479189Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732678479189Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732678479202 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732678479202Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732678479212 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732678479226 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732678479226Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732678479237 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732678479251 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732678479251Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732678479261 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732678479274 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732678479274Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62f607dd: reopening flushed file at 1732678479284 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ef2863b: reopening flushed file at 1732678479290 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10abea60: reopening flushed file at 1732678479295 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@600f2f5: reopening flushed file at 1732678479301 (+6 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=60, compaction requested=false at 1732678479308 (+7 ms)Writing region close event to WAL at 1732678479309 (+1 ms)Closed at 1732678479309 2024-11-27T03:34:39,310 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:39,310 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:39,310 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:39,310 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:39,310 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:34:39,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37315 is added to blk_1073741890_1073 (size=1045) 2024-11-27T03:34:39,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42707 is added to blk_1073741890_1073 (size=1045) 2024-11-27T03:34:39,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:39,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:40,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:40,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:40,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:40,738 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-27T03:34:40,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:40,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:40,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:40,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:40,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:40,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:40,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:40,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:41,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:41,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:41,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:42,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:42,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:42,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:43,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:43,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:43,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:44,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:44,310 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-27T03:34:44,311 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:34:44,311 INFO [M:0;66d3634da959:45173 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-27T03:34:44,312 INFO [M:0;66d3634da959:45173 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45173 2024-11-27T03:34:44,312 INFO [M:0;66d3634da959:45173 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:34:44,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:34:44,465 INFO [M:0;66d3634da959:45173 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:34:44,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45173-0x1017a5aa3ba0000, quorum=127.0.0.1:60759, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:34:44,473 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3eb1ba79{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:44,473 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@117da513{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:34:44,473 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:34:44,473 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22c6c03b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:34:44,474 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e00c8cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,STOPPED} 2024-11-27T03:34:44,475 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:34:44,475 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:34:44,475 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1346822850-172.17.0.3-1732678426327 (Datanode Uuid 4404529f-685b-4abe-8546-99a2e400169c) service to localhost/127.0.0.1:44897 2024-11-27T03:34:44,475 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:34:44,476 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data3/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:44,477 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data4/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:44,477 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:34:44,480 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f0a2519{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:44,480 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68004957{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:34:44,480 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:34:44,480 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719bbb9b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:34:44,480 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fb911ed{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,STOPPED} 2024-11-27T03:34:44,481 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:34:44,482 WARN [BP-1346822850-172.17.0.3-1732678426327 heartbeating to localhost/127.0.0.1:44897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1346822850-172.17.0.3-1732678426327 (Datanode Uuid 127cfd7c-c7cb-42c1-91ff-5618fffaf028) service to localhost/127.0.0.1:44897 2024-11-27T03:34:44,482 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data5/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:44,482 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/cluster_51b2c1c2-5b5b-ece3-dd1d-ad477129e24d/data/data6/current/BP-1346822850-172.17.0.3-1732678426327 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:34:44,483 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:34:44,484 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:34:44,484 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:34:44,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:44,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c461833{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:34:44,490 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33e53d1d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:34:44,490 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:34:44,490 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ab5393f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:34:44,491 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@692b8c40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir/,STOPPED} 2024-11-27T03:34:44,497 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-27T03:34:44,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:44,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-27T03:34:44,534 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 78) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44897 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44897 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:44897 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f051cbf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f051cbf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f051cbf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$894/0x00007f051cbf4000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44897 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44897 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44897 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39527 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:44897 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44897 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44897 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44897 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:39527 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=431 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=46 (was 93), ProcessCount=11 (was 11), AvailableMemoryMB=7995 (was 8437) 2024-11-27T03:34:44,541 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=431, MaxFileDescriptor=1048576, SystemLoadAverage=46, ProcessCount=11, AvailableMemoryMB=7995 2024-11-27T03:34:44,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-27T03:34:44,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.log.dir so I do NOT create it in target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d 2024-11-27T03:34:44,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7fb6f5c3-836d-4c04-5033-6a7482733cc6/hadoop.tmp.dir so I do NOT create it in target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d 2024-11-27T03:34:44,541 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd, deleteOnExit=true 2024-11-27T03:34:44,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-27T03:34:44,541 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/test.cache.data in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.tmp.dir in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-27T03:34:44,542 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-27T03:34:44,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:34:44,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:34:44,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-27T03:34:44,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/nfs.dump.dir in system properties and HBase conf 2024-11-27T03:34:44,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/java.io.tmpdir in system properties and HBase conf 2024-11-27T03:34:44,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:34:44,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-27T03:34:44,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-27T03:34:44,555 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:34:44,908 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:34:44,912 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:34:44,914 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:34:44,914 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:34:44,914 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:34:44,914 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:34:44,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42369482{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:34:44,915 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@624b3986{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:34:45,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@771a779f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/java.io.tmpdir/jetty-localhost-32827-hadoop-hdfs-3_4_1-tests_jar-_-any-15915776983538716986/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:34:45,008 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@23fcbb95{HTTP/1.1, (http/1.1)}{localhost:32827} 2024-11-27T03:34:45,008 INFO [Time-limited test {}] server.Server(415): Started @166052ms 2024-11-27T03:34:45,019 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:34:45,230 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-27T03:34:45,279 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:34:45,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:45,282 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:34:45,283 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:34:45,283 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:34:45,283 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:34:45,284 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d6935b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:34:45,284 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cb54bd6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:34:45,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:45,377 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4fb17e3e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/java.io.tmpdir/jetty-localhost-46437-hadoop-hdfs-3_4_1-tests_jar-_-any-17425491423705914442/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:45,377 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@393a832c{HTTP/1.1, (http/1.1)}{localhost:46437} 2024-11-27T03:34:45,377 INFO [Time-limited test {}] server.Server(415): Started @166422ms 2024-11-27T03:34:45,379 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:34:45,403 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:34:45,407 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:34:45,408 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:34:45,408 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:34:45,408 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:34:45,409 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b91ed3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:34:45,409 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a20a16b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:34:45,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:45,505 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5507dc7e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/java.io.tmpdir/jetty-localhost-36557-hadoop-hdfs-3_4_1-tests_jar-_-any-13190084251553116731/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:34:45,505 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@220d6e9d{HTTP/1.1, (http/1.1)}{localhost:36557} 2024-11-27T03:34:45,505 INFO [Time-limited test {}] server.Server(415): Started @166549ms 2024-11-27T03:34:45,507 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:34:45,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:46,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:46,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:46,482 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data1/current/BP-73260669-172.17.0.3-1732678484566/current, will proceed with Du for space computation calculation, 2024-11-27T03:34:46,483 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data2/current/BP-73260669-172.17.0.3-1732678484566/current, will proceed with Du for space computation calculation, 2024-11-27T03:34:46,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:46,500 WARN [Thread-1176 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:34:46,502 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42fb67941efcac3a with lease ID 0xe51c58e67f4a7020: Processing first storage report for DS-3e3b894e-5647-4471-a448-9f351c76c63d from datanode DatanodeRegistration(127.0.0.1:46077, datanodeUuid=6e5d0235-d396-4191-ae35-0410e7e037cc, infoPort=39443, infoSecurePort=0, ipcPort=41667, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566) 2024-11-27T03:34:46,502 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42fb67941efcac3a with lease ID 0xe51c58e67f4a7020: from storage DS-3e3b894e-5647-4471-a448-9f351c76c63d node DatanodeRegistration(127.0.0.1:46077, datanodeUuid=6e5d0235-d396-4191-ae35-0410e7e037cc, infoPort=39443, infoSecurePort=0, ipcPort=41667, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:34:46,502 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42fb67941efcac3a with lease ID 0xe51c58e67f4a7020: Processing first storage report for DS-a3133cc5-c616-42ff-a520-e64096f5b4ef from datanode DatanodeRegistration(127.0.0.1:46077, datanodeUuid=6e5d0235-d396-4191-ae35-0410e7e037cc, infoPort=39443, infoSecurePort=0, ipcPort=41667, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566) 2024-11-27T03:34:46,502 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42fb67941efcac3a with lease ID 0xe51c58e67f4a7020: from storage DS-a3133cc5-c616-42ff-a520-e64096f5b4ef node DatanodeRegistration(127.0.0.1:46077, datanodeUuid=6e5d0235-d396-4191-ae35-0410e7e037cc, infoPort=39443, infoSecurePort=0, ipcPort=41667, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:34:46,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:46,630 WARN [Thread-1223 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data3/current/BP-73260669-172.17.0.3-1732678484566/current, will proceed with Du for space computation calculation, 2024-11-27T03:34:46,630 WARN [Thread-1224 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data4/current/BP-73260669-172.17.0.3-1732678484566/current, will proceed with Du for space computation calculation, 2024-11-27T03:34:46,648 WARN [Thread-1199 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:34:46,651 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5b59b881e6a0ed7a with lease ID 0xe51c58e67f4a7021: Processing first storage report for DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c from datanode DatanodeRegistration(127.0.0.1:40429, datanodeUuid=f85354ed-74a4-4d2e-98af-7ef9ac6645a0, infoPort=37893, infoSecurePort=0, ipcPort=45951, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566) 2024-11-27T03:34:46,651 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b59b881e6a0ed7a with lease ID 0xe51c58e67f4a7021: from storage DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c node DatanodeRegistration(127.0.0.1:40429, datanodeUuid=f85354ed-74a4-4d2e-98af-7ef9ac6645a0, infoPort=37893, infoSecurePort=0, ipcPort=45951, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:34:46,651 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5b59b881e6a0ed7a with lease ID 0xe51c58e67f4a7021: Processing first storage report for DS-7dc70174-d44f-4df2-9830-e30cafd70231 from datanode DatanodeRegistration(127.0.0.1:40429, datanodeUuid=f85354ed-74a4-4d2e-98af-7ef9ac6645a0, infoPort=37893, infoSecurePort=0, ipcPort=45951, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566) 2024-11-27T03:34:46,651 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b59b881e6a0ed7a with lease ID 0xe51c58e67f4a7021: from storage DS-7dc70174-d44f-4df2-9830-e30cafd70231 node DatanodeRegistration(127.0.0.1:40429, datanodeUuid=f85354ed-74a4-4d2e-98af-7ef9ac6645a0, infoPort=37893, infoSecurePort=0, ipcPort=45951, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:34:46,747 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d 2024-11-27T03:34:46,771 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/zookeeper_0, clientPort=58527, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-27T03:34:46,772 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58527 2024-11-27T03:34:46,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:34:46,774 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:34:46,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:34:46,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:34:46,787 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d with version=8 2024-11-27T03:34:46,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/hbase-staging 2024-11-27T03:34:46,789 INFO [Time-limited test {}] client.ConnectionUtils(128): master/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:34:46,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:34:46,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:34:46,789 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:34:46,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:34:46,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:34:46,789 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-27T03:34:46,789 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:34:46,790 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45307 2024-11-27T03:34:46,791 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45307 connecting to ZooKeeper ensemble=127.0.0.1:58527 2024-11-27T03:34:46,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:453070x0, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:34:46,840 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45307-0x1017a5b869d0000 connected 2024-11-27T03:34:46,934 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:34:46,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:34:46,943 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:34:46,944 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d, hbase.cluster.distributed=false 2024-11-27T03:34:46,945 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:34:46,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45307 2024-11-27T03:34:46,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45307 2024-11-27T03:34:46,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45307 2024-11-27T03:34:46,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45307 2024-11-27T03:34:46,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45307 2024-11-27T03:34:46,962 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:34:46,962 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:34:46,962 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:34:46,962 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:34:46,962 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:34:46,962 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:34:46,962 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T03:34:46,962 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:34:46,963 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37903 2024-11-27T03:34:46,964 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37903 connecting to ZooKeeper ensemble=127.0.0.1:58527 2024-11-27T03:34:46,965 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:34:46,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:34:46,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379030x0, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:34:46,975 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:34:46,975 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37903-0x1017a5b869d0001 connected 2024-11-27T03:34:46,975 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-27T03:34:46,976 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-27T03:34:46,976 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T03:34:46,977 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:34:46,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37903 2024-11-27T03:34:46,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37903 2024-11-27T03:34:46,978 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37903 2024-11-27T03:34:46,979 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37903 2024-11-27T03:34:46,979 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37903 2024-11-27T03:34:46,992 DEBUG [M:0;66d3634da959:45307 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;66d3634da959:45307 2024-11-27T03:34:46,992 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/66d3634da959,45307,1732678486788 2024-11-27T03:34:47,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:34:47,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:34:47,002 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/66d3634da959,45307,1732678486788 2024-11-27T03:34:47,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T03:34:47,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,013 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T03:34:47,014 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/66d3634da959,45307,1732678486788 from backup master directory 2024-11-27T03:34:47,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/66d3634da959,45307,1732678486788 2024-11-27T03:34:47,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:34:47,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:34:47,023 WARN [master/66d3634da959:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:34:47,023 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=66d3634da959,45307,1732678486788 2024-11-27T03:34:47,030 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/hbase.id] with ID: 3ad1a8e6-0e21-4d97-aa49-0e5f39ede569 2024-11-27T03:34:47,030 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/.tmp/hbase.id 2024-11-27T03:34:47,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:34:47,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:34:47,037 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/.tmp/hbase.id]:[hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/hbase.id] 2024-11-27T03:34:47,047 INFO [master/66d3634da959:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:34:47,047 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-27T03:34:47,049 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-27T03:34:47,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:34:47,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:34:47,067 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:34:47,069 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-27T03:34:47,069 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:34:47,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:34:47,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:34:47,079 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store 2024-11-27T03:34:47,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:34:47,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:34:47,086 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:34:47,086 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:34:47,086 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:34:47,086 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:34:47,086 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:34:47,086 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:34:47,086 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:34:47,086 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678487086Disabling compacts and flushes for region at 1732678487086Disabling writes for close at 1732678487086Writing region close event to WAL at 1732678487086Closed at 1732678487086 2024-11-27T03:34:47,087 WARN [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/.initializing 2024-11-27T03:34:47,087 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788 2024-11-27T03:34:47,089 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C45307%2C1732678486788, suffix=, logDir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788, archiveDir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/oldWALs, maxLogs=10 2024-11-27T03:34:47,089 INFO [master/66d3634da959:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C45307%2C1732678486788.1732678487089 2024-11-27T03:34:47,094 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788/66d3634da959%2C45307%2C1732678486788.1732678487089 2024-11-27T03:34:47,094 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39443:39443),(127.0.0.1/127.0.0.1:37893:37893)] 2024-11-27T03:34:47,095 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:34:47,095 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:34:47,095 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,095 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,099 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,100 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-27T03:34:47,101 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:34:47,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,102 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-27T03:34:47,102 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:34:47,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-27T03:34:47,104 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:34:47,105 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,106 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-27T03:34:47,106 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,107 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:34:47,107 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,108 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,108 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,109 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,109 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,110 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-27T03:34:47,110 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:34:47,112 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:34:47,113 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771838, jitterRate=-0.018557995557785034}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-27T03:34:47,113 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732678487095Initializing all the Stores at 1732678487096 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678487096Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678487099 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678487099Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678487099Cleaning up temporary data from old regions at 1732678487109 (+10 ms)Region opened successfully at 1732678487113 (+4 ms) 2024-11-27T03:34:47,113 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-27T03:34:47,116 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@729456ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:34:47,117 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-27T03:34:47,117 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-27T03:34:47,117 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-27T03:34:47,118 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-27T03:34:47,118 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-27T03:34:47,118 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-27T03:34:47,118 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-27T03:34:47,121 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-27T03:34:47,121 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-27T03:34:47,132 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-27T03:34:47,133 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-27T03:34:47,134 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-27T03:34:47,143 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-27T03:34:47,143 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-27T03:34:47,145 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-27T03:34:47,154 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-27T03:34:47,155 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-27T03:34:47,164 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-27T03:34:47,166 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-27T03:34:47,175 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-27T03:34:47,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:34:47,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:34:47,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,186 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=66d3634da959,45307,1732678486788, sessionid=0x1017a5b869d0000, setting cluster-up flag (Was=false) 2024-11-27T03:34:47,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,238 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-27T03:34:47,240 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,45307,1732678486788 2024-11-27T03:34:47,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:47,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:47,291 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-27T03:34:47,295 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,45307,1732678486788 2024-11-27T03:34:47,298 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-27T03:34:47,301 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-27T03:34:47,302 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-27T03:34:47,302 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-27T03:34:47,302 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 66d3634da959,45307,1732678486788 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-27T03:34:47,305 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:34:47,305 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:34:47,305 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:34:47,305 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:34:47,305 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/66d3634da959:0, corePoolSize=10, maxPoolSize=10 2024-11-27T03:34:47,305 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,306 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:34:47,306 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,308 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732678517308 2024-11-27T03:34:47,308 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:34:47,309 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-27T03:34:47,309 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-27T03:34:47,309 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-27T03:34:47,309 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-27T03:34:47,309 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-27T03:34:47,309 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-27T03:34:47,309 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-27T03:34:47,309 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,310 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-27T03:34:47,310 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-27T03:34:47,310 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-27T03:34:47,310 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,311 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-27T03:34:47,311 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-27T03:34:47,311 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T03:34:47,311 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678487311,5,FailOnTimeoutGroup] 2024-11-27T03:34:47,312 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678487312,5,FailOnTimeoutGroup] 2024-11-27T03:34:47,312 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,312 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-27T03:34:47,312 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,312 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:34:47,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:34:47,320 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-27T03:34:47,320 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d 2024-11-27T03:34:47,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:34:47,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:34:47,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:34:47,328 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:34:47,329 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:34:47,329 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:34:47,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:34:47,331 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:34:47,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:34:47,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:34:47,332 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:34:47,332 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:34:47,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:34:47,334 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:34:47,334 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:34:47,334 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:34:47,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740 2024-11-27T03:34:47,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740 2024-11-27T03:34:47,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:34:47,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:34:47,337 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:34:47,337 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:34:47,339 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:34:47,340 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774386, jitterRate=-0.01531781256198883}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:34:47,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732678487327Initializing all the Stores at 1732678487327Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678487327Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678487328 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678487328Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678487328Cleaning up temporary data from old regions at 1732678487336 (+8 ms)Region opened successfully at 1732678487340 (+4 ms) 2024-11-27T03:34:47,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:34:47,340 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:34:47,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:34:47,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:34:47,340 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:34:47,341 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:34:47,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678487340Disabling compacts and flushes for region at 1732678487340Disabling writes for close at 1732678487340Writing region close event to WAL at 1732678487341 (+1 ms)Closed at 1732678487341 2024-11-27T03:34:47,342 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:34:47,342 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-27T03:34:47,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-27T03:34:47,343 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:34:47,344 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-27T03:34:47,382 INFO [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(746): ClusterId : 3ad1a8e6-0e21-4d97-aa49-0e5f39ede569 2024-11-27T03:34:47,382 DEBUG [RS:0;66d3634da959:37903 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-27T03:34:47,393 DEBUG [RS:0;66d3634da959:37903 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-27T03:34:47,393 DEBUG [RS:0;66d3634da959:37903 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-27T03:34:47,404 DEBUG [RS:0;66d3634da959:37903 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-27T03:34:47,404 DEBUG [RS:0;66d3634da959:37903 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35c11a6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:34:47,426 DEBUG [RS:0;66d3634da959:37903 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;66d3634da959:37903 2024-11-27T03:34:47,426 INFO [RS:0;66d3634da959:37903 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-27T03:34:47,426 INFO [RS:0;66d3634da959:37903 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-27T03:34:47,426 DEBUG [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-27T03:34:47,427 INFO [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(2659): reportForDuty to master=66d3634da959,45307,1732678486788 with port=37903, startcode=1732678486962 2024-11-27T03:34:47,427 DEBUG [RS:0;66d3634da959:37903 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T03:34:47,429 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51569, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T03:34:47,429 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45307 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 66d3634da959,37903,1732678486962 2024-11-27T03:34:47,429 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45307 {}] master.ServerManager(517): Registering regionserver=66d3634da959,37903,1732678486962 2024-11-27T03:34:47,431 DEBUG [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d 2024-11-27T03:34:47,431 DEBUG [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39505 2024-11-27T03:34:47,431 DEBUG [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-27T03:34:47,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:34:47,438 DEBUG [RS:0;66d3634da959:37903 {}] zookeeper.ZKUtil(111): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d3634da959,37903,1732678486962 2024-11-27T03:34:47,438 WARN [RS:0;66d3634da959:37903 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:34:47,439 INFO [RS:0;66d3634da959:37903 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:34:47,439 DEBUG [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962 2024-11-27T03:34:47,439 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d3634da959,37903,1732678486962] 2024-11-27T03:34:47,442 INFO [RS:0;66d3634da959:37903 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-27T03:34:47,444 INFO [RS:0;66d3634da959:37903 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-27T03:34:47,445 INFO [RS:0;66d3634da959:37903 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T03:34:47,445 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,445 INFO [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-27T03:34:47,445 INFO [RS:0;66d3634da959:37903 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-27T03:34:47,446 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:34:47,446 DEBUG [RS:0;66d3634da959:37903 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:34:47,447 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,447 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,447 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,447 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,447 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,447 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,37903,1732678486962-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:34:47,460 INFO [RS:0;66d3634da959:37903 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-27T03:34:47,460 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,37903,1732678486962-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,460 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,460 INFO [RS:0;66d3634da959:37903 {}] regionserver.Replication(171): 66d3634da959,37903,1732678486962 started 2024-11-27T03:34:47,473 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:47,473 INFO [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(1482): Serving as 66d3634da959,37903,1732678486962, RpcServer on 66d3634da959/172.17.0.3:37903, sessionid=0x1017a5b869d0001 2024-11-27T03:34:47,473 DEBUG [RS:0;66d3634da959:37903 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-27T03:34:47,473 DEBUG [RS:0;66d3634da959:37903 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d3634da959,37903,1732678486962 2024-11-27T03:34:47,473 DEBUG [RS:0;66d3634da959:37903 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,37903,1732678486962' 2024-11-27T03:34:47,473 DEBUG [RS:0;66d3634da959:37903 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-27T03:34:47,474 DEBUG [RS:0;66d3634da959:37903 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-27T03:34:47,474 DEBUG [RS:0;66d3634da959:37903 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-27T03:34:47,474 DEBUG [RS:0;66d3634da959:37903 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-27T03:34:47,474 DEBUG [RS:0;66d3634da959:37903 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d3634da959,37903,1732678486962 2024-11-27T03:34:47,474 DEBUG [RS:0;66d3634da959:37903 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,37903,1732678486962' 2024-11-27T03:34:47,474 DEBUG [RS:0;66d3634da959:37903 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-27T03:34:47,475 DEBUG [RS:0;66d3634da959:37903 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-27T03:34:47,475 DEBUG [RS:0;66d3634da959:37903 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-27T03:34:47,475 INFO [RS:0;66d3634da959:37903 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-27T03:34:47,475 INFO [RS:0;66d3634da959:37903 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-27T03:34:47,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:47,495 WARN [66d3634da959:45307 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-27T03:34:47,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:47,579 INFO [RS:0;66d3634da959:37903 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C37903%2C1732678486962, suffix=, logDir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962, archiveDir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/oldWALs, maxLogs=32 2024-11-27T03:34:47,581 INFO [RS:0;66d3634da959:37903 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C37903%2C1732678486962.1732678487581 2024-11-27T03:34:47,588 INFO [RS:0;66d3634da959:37903 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 2024-11-27T03:34:47,590 DEBUG [RS:0;66d3634da959:37903 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39443:39443),(127.0.0.1/127.0.0.1:37893:37893)] 2024-11-27T03:34:47,745 DEBUG [66d3634da959:45307 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-27T03:34:47,746 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=66d3634da959,37903,1732678486962 2024-11-27T03:34:47,750 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,37903,1732678486962, state=OPENING 2024-11-27T03:34:47,764 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-27T03:34:47,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:34:47,776 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:34:47,777 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:34:47,777 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:34:47,777 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,37903,1732678486962}] 2024-11-27T03:34:47,933 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-27T03:34:47,935 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48059, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-27T03:34:47,942 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-27T03:34:47,942 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:34:47,945 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C37903%2C1732678486962.meta, suffix=.meta, logDir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962, archiveDir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/oldWALs, maxLogs=32 2024-11-27T03:34:47,945 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta 2024-11-27T03:34:47,950 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta 2024-11-27T03:34:47,954 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39443:39443),(127.0.0.1/127.0.0.1:37893:37893)] 2024-11-27T03:34:47,960 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:34:47,960 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-27T03:34:47,961 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-27T03:34:47,961 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-27T03:34:47,961 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-27T03:34:47,961 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:34:47,961 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-27T03:34:47,961 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-27T03:34:47,962 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:34:47,963 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:34:47,963 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,963 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:34:47,963 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:34:47,964 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:34:47,964 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:34:47,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:34:47,965 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:34:47,965 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:34:47,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:34:47,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:34:47,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:47,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:34:47,967 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:34:47,968 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740 2024-11-27T03:34:47,969 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740 2024-11-27T03:34:47,970 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:34:47,970 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:34:47,971 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:34:47,972 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:34:47,973 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=724934, jitterRate=-0.07819890975952148}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:34:47,973 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-27T03:34:47,973 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732678487961Writing region info on filesystem at 1732678487961Initializing all the Stores at 1732678487962 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678487962Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678487962Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678487962Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678487962Cleaning up temporary data from old regions at 1732678487970 (+8 ms)Running coprocessor post-open hooks at 1732678487973 (+3 ms)Region opened successfully at 1732678487973 2024-11-27T03:34:47,974 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732678487933 2024-11-27T03:34:47,976 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-27T03:34:47,976 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-27T03:34:47,977 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,37903,1732678486962 2024-11-27T03:34:47,978 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,37903,1732678486962, state=OPEN 2024-11-27T03:34:48,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:34:48,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:34:48,016 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=66d3634da959,37903,1732678486962 2024-11-27T03:34:48,016 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:34:48,016 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:34:48,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-27T03:34:48,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,37903,1732678486962 in 239 msec 2024-11-27T03:34:48,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-27T03:34:48,025 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 680 msec 2024-11-27T03:34:48,027 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:34:48,027 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-27T03:34:48,028 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:34:48,029 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,37903,1732678486962, seqNum=-1] 2024-11-27T03:34:48,029 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:34:48,031 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41101, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:34:48,039 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 737 msec 2024-11-27T03:34:48,040 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732678488040, completionTime=-1 2024-11-27T03:34:48,040 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-27T03:34:48,040 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-27T03:34:48,042 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-27T03:34:48,042 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732678548042 2024-11-27T03:34:48,042 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732678608042 2024-11-27T03:34:48,042 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-27T03:34:48,042 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45307,1732678486788-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:48,042 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45307,1732678486788-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:48,042 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45307,1732678486788-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:48,042 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-66d3634da959:45307, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:48,042 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:48,042 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:48,044 DEBUG [master/66d3634da959:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-27T03:34:48,046 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.023sec 2024-11-27T03:34:48,046 INFO [master/66d3634da959:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-27T03:34:48,046 INFO [master/66d3634da959:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-27T03:34:48,046 INFO [master/66d3634da959:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-27T03:34:48,046 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-27T03:34:48,046 INFO [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-27T03:34:48,046 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45307,1732678486788-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:34:48,046 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45307,1732678486788-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-27T03:34:48,048 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-27T03:34:48,048 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-27T03:34:48,048 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,45307,1732678486788-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:34:48,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4da0cc4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:34:48,082 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 66d3634da959,45307,-1 for getting cluster id 2024-11-27T03:34:48,082 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-27T03:34:48,084 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3ad1a8e6-0e21-4d97-aa49-0e5f39ede569' 2024-11-27T03:34:48,085 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-27T03:34:48,085 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3ad1a8e6-0e21-4d97-aa49-0e5f39ede569" 2024-11-27T03:34:48,085 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@97be0d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:34:48,085 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [66d3634da959,45307,-1] 2024-11-27T03:34:48,086 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-27T03:34:48,086 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:34:48,088 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33494, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-27T03:34:48,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bfe6512, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:34:48,090 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:34:48,091 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,37903,1732678486962, seqNum=-1] 2024-11-27T03:34:48,092 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:34:48,094 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58828, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:34:48,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=66d3634da959,45307,1732678486788 2024-11-27T03:34:48,096 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:34:48,100 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-27T03:34:48,100 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-27T03:34:48,101 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-27T03:34:48,101 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-27T03:34:48,102 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 66d3634da959,45307,1732678486788 2024-11-27T03:34:48,102 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2450b2c5 2024-11-27T03:34:48,102 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T03:34:48,105 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33508, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T03:34:48,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45307 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-27T03:34:48,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45307 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-27T03:34:48,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45307 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:34:48,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45307 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-27T03:34:48,109 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T03:34:48,110 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:48,110 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45307 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-27T03:34:48,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45307 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-27T03:34:48,111 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T03:34:48,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741835_1011 (size=395) 2024-11-27T03:34:48,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741835_1011 (size=395) 2024-11-27T03:34:48,121 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7635b95fa1ac59cded4d75ff52d8a71e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d 2024-11-27T03:34:48,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40429 is added to blk_1073741836_1012 (size=78) 2024-11-27T03:34:48,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46077 is added to blk_1073741836_1012 (size=78) 2024-11-27T03:34:48,129 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:34:48,129 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 7635b95fa1ac59cded4d75ff52d8a71e, disabling compactions & flushes 2024-11-27T03:34:48,129 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:34:48,129 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:34:48,129 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. after waiting 0 ms 2024-11-27T03:34:48,129 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:34:48,129 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:34:48,129 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7635b95fa1ac59cded4d75ff52d8a71e: Waiting for close lock at 1732678488129Disabling compacts and flushes for region at 1732678488129Disabling writes for close at 1732678488129Writing region close event to WAL at 1732678488129Closed at 1732678488129 2024-11-27T03:34:48,131 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T03:34:48,131 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732678488131"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732678488131"}]},"ts":"1732678488131"} 2024-11-27T03:34:48,133 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-27T03:34:48,134 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T03:34:48,135 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732678488135"}]},"ts":"1732678488135"} 2024-11-27T03:34:48,137 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-27T03:34:48,137 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7635b95fa1ac59cded4d75ff52d8a71e, ASSIGN}] 2024-11-27T03:34:48,138 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7635b95fa1ac59cded4d75ff52d8a71e, ASSIGN 2024-11-27T03:34:48,139 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7635b95fa1ac59cded4d75ff52d8a71e, ASSIGN; state=OFFLINE, location=66d3634da959,37903,1732678486962; forceNewPlan=false, retain=false 2024-11-27T03:34:48,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:48,291 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7635b95fa1ac59cded4d75ff52d8a71e, regionState=OPENING, regionLocation=66d3634da959,37903,1732678486962 2024-11-27T03:34:48,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:48,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7635b95fa1ac59cded4d75ff52d8a71e, ASSIGN because future has completed 2024-11-27T03:34:48,299 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7635b95fa1ac59cded4d75ff52d8a71e, server=66d3634da959,37903,1732678486962}] 2024-11-27T03:34:48,463 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:34:48,463 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7635b95fa1ac59cded4d75ff52d8a71e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:34:48,464 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,464 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:34:48,464 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,464 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,466 INFO [StoreOpener-7635b95fa1ac59cded4d75ff52d8a71e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,468 INFO [StoreOpener-7635b95fa1ac59cded4d75ff52d8a71e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7635b95fa1ac59cded4d75ff52d8a71e columnFamilyName info 2024-11-27T03:34:48,468 DEBUG [StoreOpener-7635b95fa1ac59cded4d75ff52d8a71e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:34:48,469 INFO [StoreOpener-7635b95fa1ac59cded4d75ff52d8a71e-1 {}] regionserver.HStore(327): Store=7635b95fa1ac59cded4d75ff52d8a71e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:34:48,469 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,470 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/default/TestLogRolling-testLogRollOnPipelineRestart/7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,471 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/default/TestLogRolling-testLogRollOnPipelineRestart/7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,471 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,471 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,473 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,476 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/default/TestLogRolling-testLogRollOnPipelineRestart/7635b95fa1ac59cded4d75ff52d8a71e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:34:48,476 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7635b95fa1ac59cded4d75ff52d8a71e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742606, jitterRate=-0.05572836101055145}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-27T03:34:48,476 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:34:48,477 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7635b95fa1ac59cded4d75ff52d8a71e: Running coprocessor pre-open hook at 1732678488464Writing region info on filesystem at 1732678488464Initializing all the Stores at 1732678488466 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678488466Cleaning up temporary data from old regions at 1732678488471 (+5 ms)Running coprocessor post-open hooks at 1732678488476 (+5 ms)Region opened successfully at 1732678488477 (+1 ms) 2024-11-27T03:34:48,478 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e., pid=6, masterSystemTime=1732678488453 2024-11-27T03:34:48,481 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:34:48,481 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:34:48,482 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7635b95fa1ac59cded4d75ff52d8a71e, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,37903,1732678486962 2024-11-27T03:34:48,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7635b95fa1ac59cded4d75ff52d8a71e, server=66d3634da959,37903,1732678486962 because future has completed 2024-11-27T03:34:48,488 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-27T03:34:48,488 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7635b95fa1ac59cded4d75ff52d8a71e, server=66d3634da959,37903,1732678486962 in 186 msec 2024-11-27T03:34:48,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-27T03:34:48,491 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=7635b95fa1ac59cded4d75ff52d8a71e, ASSIGN in 351 msec 2024-11-27T03:34:48,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:48,491 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T03:34:48,492 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732678488491"}]},"ts":"1732678488491"} 2024-11-27T03:34:48,494 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-27T03:34:48,495 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T03:34:48,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 389 msec 2024-11-27T03:34:48,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:49,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:49,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:49,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:49,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:50,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:50,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:50,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:50,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:50,738 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-27T03:34:50,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:50,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:50,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:50,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:50,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:50,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:50,767 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:50,767 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:50,767 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:50,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:34:51,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:51,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:51,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:51,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:52,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:52,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:52,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:52,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:53,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:53,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:53,443 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-27T03:34:53,445 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-27T03:34:53,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:53,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:54,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:54,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:54,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:54,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:55,230 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-27T03:34:55,231 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-27T03:34:55,233 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-27T03:34:55,233 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-27T03:34:55,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:34:55,235 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-27T03:34:55,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-27T03:34:55,235 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-27T03:34:55,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:55,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:55,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:55,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:56,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:56,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:56,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:56,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:57,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:57,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:57,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:57,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:58,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45307 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-27T03:34:58,166 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-27T03:34:58,166 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-27T03:34:58,175 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-27T03:34:58,175 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:34:58,181 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e., hostname=66d3634da959,37903,1732678486962, seqNum=2] 2024-11-27T03:34:58,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:58,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:58,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:58,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:59,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:59,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:59,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:34:59,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:00,185 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 2024-11-27T03:35:00,186 WARN [ResponseProcessor for block BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:00,186 WARN [ResponseProcessor for block BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:00,186 WARN [ResponseProcessor for block BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:40429,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:00,187 WARN [DataStreamer for file /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta block BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40429,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]) is bad. 2024-11-27T03:35:00,187 WARN [DataStreamer for file /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 block BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40429,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]) is bad. 2024-11-27T03:35:00,187 WARN [DataStreamer for file /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788/66d3634da959%2C45307%2C1732678486788.1732678487089 block BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK], DatanodeInfoWithStorage[127.0.0.1:40429,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40429,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]) is bad. 2024-11-27T03:35:00,187 WARN [PacketResponder: BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40429] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,187 WARN [PacketResponder: BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40429] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,187 WARN [PacketResponder: BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40429] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-18935253_22 at /127.0.0.1:60726 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60726 dst: /127.0.0.1:46077 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-18935253_22 at /127.0.0.1:60720 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60720 dst: /127.0.0.1:46077 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1660988170_22 at /127.0.0.1:60682 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60682 dst: /127.0.0.1:46077 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1660988170_22 at /127.0.0.1:41312 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41312 dst: /127.0.0.1:40429 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,189 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-18935253_22 at /127.0.0.1:41344 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41344 dst: /127.0.0.1:40429 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,190 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-18935253_22 at /127.0.0.1:41332 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40429:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41332 dst: /127.0.0.1:40429 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5507dc7e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:00,227 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@220d6e9d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:35:00,227 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:35:00,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a20a16b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:35:00,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b91ed3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,STOPPED} 2024-11-27T03:35:00,229 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:35:00,229 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:35:00,229 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:35:00,229 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-73260669-172.17.0.3-1732678484566 (Datanode Uuid f85354ed-74a4-4d2e-98af-7ef9ac6645a0) service to localhost/127.0.0.1:39505 2024-11-27T03:35:00,230 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data3/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:00,230 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data4/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:00,230 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:35:00,243 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:35:00,246 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:35:00,246 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:35:00,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:35:00,247 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:35:00,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e3b369c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:35:00,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d61cf28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:35:00,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:00,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:00,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a397072{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/java.io.tmpdir/jetty-localhost-38341-hadoop-hdfs-3_4_1-tests_jar-_-any-5717523438205630570/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:00,339 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5bc55163{HTTP/1.1, (http/1.1)}{localhost:38341} 2024-11-27T03:35:00,339 INFO [Time-limited test {}] server.Server(415): Started @181383ms 2024-11-27T03:35:00,340 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:35:00,357 WARN [ResponseProcessor for block BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:00,357 WARN [ResponseProcessor for block BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:00,357 WARN [ResponseProcessor for block BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:00,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-18935253_22 at /127.0.0.1:56420 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56420 dst: /127.0.0.1:46077 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-18935253_22 at /127.0.0.1:56440 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56440 dst: /127.0.0.1:46077 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,357 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1660988170_22 at /127.0.0.1:56422 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56422 dst: /127.0.0.1:46077 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:00,361 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4fb17e3e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:00,362 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@393a832c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:35:00,362 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:35:00,362 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cb54bd6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:35:00,362 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d6935b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,STOPPED} 2024-11-27T03:35:00,363 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:35:00,363 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:35:00,363 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-73260669-172.17.0.3-1732678484566 (Datanode Uuid 6e5d0235-d396-4191-ae35-0410e7e037cc) service to localhost/127.0.0.1:39505 2024-11-27T03:35:00,363 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:35:00,364 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data1/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:00,364 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data2/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:00,364 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:35:00,372 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:35:00,375 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:35:00,378 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:35:00,378 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:35:00,378 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:35:00,378 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29d5ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:35:00,379 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7939cb3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:35:00,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1dad3af2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/java.io.tmpdir/jetty-localhost-35603-hadoop-hdfs-3_4_1-tests_jar-_-any-4147888835625062278/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:00,470 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4307cd3{HTTP/1.1, (http/1.1)}{localhost:35603} 2024-11-27T03:35:00,471 INFO [Time-limited test {}] server.Server(415): Started @181515ms 2024-11-27T03:35:00,472 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:35:00,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:00,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:00,904 WARN [Thread-1347 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:35:00,907 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaee8bf8688a1904c with lease ID 0xe51c58e67f4a7022: from storage DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c node DatanodeRegistration(127.0.0.1:35307, datanodeUuid=f85354ed-74a4-4d2e-98af-7ef9ac6645a0, infoPort=39571, infoSecurePort=0, ipcPort=43611, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:00,907 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaee8bf8688a1904c with lease ID 0xe51c58e67f4a7022: from storage DS-7dc70174-d44f-4df2-9830-e30cafd70231 node DatanodeRegistration(127.0.0.1:35307, datanodeUuid=f85354ed-74a4-4d2e-98af-7ef9ac6645a0, infoPort=39571, infoSecurePort=0, ipcPort=43611, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:01,011 WARN [Thread-1367 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:35:01,013 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3c440a035bd095ee with lease ID 0xe51c58e67f4a7023: from storage DS-3e3b894e-5647-4471-a448-9f351c76c63d node DatanodeRegistration(127.0.0.1:45437, datanodeUuid=6e5d0235-d396-4191-ae35-0410e7e037cc, infoPort=36587, infoSecurePort=0, ipcPort=42771, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:01,014 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3c440a035bd095ee with lease ID 0xe51c58e67f4a7023: from storage DS-a3133cc5-c616-42ff-a520-e64096f5b4ef node DatanodeRegistration(127.0.0.1:45437, datanodeUuid=6e5d0235-d396-4191-ae35-0410e7e037cc, infoPort=36587, infoSecurePort=0, ipcPort=42771, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:01,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:01,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:01,491 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-27T03:35:01,497 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-27T03:35:01,499 ERROR [FSHLog-0-hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d-prefix:66d3634da959,37903,1732678486962 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:01,500 WARN [FSHLog-0-hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d-prefix:66d3634da959,37903,1732678486962 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:01,500 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C37903%2C1732678486962:(num 1732678487581) roll requested 2024-11-27T03:35:01,500 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C37903%2C1732678486962.1732678501500 2024-11-27T03:35:01,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:01,507 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 newFile=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 2024-11-27T03:35:01,507 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:01,507 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:01,507 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:01,507 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:01,508 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:01,508 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 2024-11-27T03:35:01,508 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:01,508 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:01,508 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 2024-11-27T03:35:01,509 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36587:36587),(127.0.0.1/127.0.0.1:39571:39571)] 2024-11-27T03:35:01,509 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 is not closed yet, will try archiving it next time 2024-11-27T03:35:01,509 WARN [IPC Server handler 4 on default port 39505 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-27T03:35:01,509 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 after 1ms 2024-11-27T03:35:01,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:02,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45437 is added to blk_1073741833_1017 (size=1632) 2024-11-27T03:35:02,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:02,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:02,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:02,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:03,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:03,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:03,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:03,516 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-27T03:35:03,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:03,910 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-27T03:35:04,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:04,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:04,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:04,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:05,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:05,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:05,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:05,511 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 after 4003ms 2024-11-27T03:35:05,521 WARN [ResponseProcessor for block BP-73260669-172.17.0.3-1732678484566:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-73260669-172.17.0.3-1732678484566:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:05,522 WARN [DataStreamer for file /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 block BP-73260669-172.17.0.3-1732678484566:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-73260669-172.17.0.3-1732678484566:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45437,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK], DatanodeInfoWithStorage[127.0.0.1:35307,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45437,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]) is bad. 2024-11-27T03:35:05,522 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-18935253_22 at /127.0.0.1:42628 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45437:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42628 dst: /127.0.0.1:45437 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:05,522 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-18935253_22 at /127.0.0.1:43028 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35307:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43028 dst: /127.0.0.1:35307 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:05,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:05,580 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1dad3af2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:05,581 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4307cd3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:35:05,581 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:35:05,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7939cb3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:35:05,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29d5ccf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,STOPPED} 2024-11-27T03:35:05,584 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:35:05,584 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:35:05,584 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:35:05,584 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-73260669-172.17.0.3-1732678484566 (Datanode Uuid 6e5d0235-d396-4191-ae35-0410e7e037cc) service to localhost/127.0.0.1:39505 2024-11-27T03:35:05,585 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data1/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:05,586 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data2/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:05,586 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:35:05,594 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:35:05,597 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:35:05,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:35:05,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:35:05,598 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:35:05,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58c81822{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:35:05,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a15b38f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:35:05,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@647c40ef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/java.io.tmpdir/jetty-localhost-34699-hadoop-hdfs-3_4_1-tests_jar-_-any-14130949296868013688/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:05,693 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38ca15c7{HTTP/1.1, (http/1.1)}{localhost:34699} 2024-11-27T03:35:05,693 INFO [Time-limited test {}] server.Server(415): Started @186737ms 2024-11-27T03:35:05,694 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:35:05,713 WARN [ResponseProcessor for block BP-73260669-172.17.0.3-1732678484566:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-73260669-172.17.0.3-1732678484566:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:05,713 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-18935253_22 at /127.0.0.1:51132 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35307:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51132 dst: /127.0.0.1:35307 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:05,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a397072{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:05,717 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5bc55163{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:35:05,717 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:35:05,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d61cf28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:35:05,717 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e3b369c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,STOPPED} 2024-11-27T03:35:05,718 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:35:05,718 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-73260669-172.17.0.3-1732678484566 (Datanode Uuid f85354ed-74a4-4d2e-98af-7ef9ac6645a0) service to localhost/127.0.0.1:39505 2024-11-27T03:35:05,718 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:35:05,718 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:35:05,719 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data3/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:05,719 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data4/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:05,719 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:35:05,731 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:35:05,735 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:35:05,735 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:35:05,735 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:35:05,736 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:35:05,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7202b0aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:35:05,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66269315{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:35:05,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5e666703{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/java.io.tmpdir/jetty-localhost-40017-hadoop-hdfs-3_4_1-tests_jar-_-any-2874018660070056103/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:05,827 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2dd41fe4{HTTP/1.1, (http/1.1)}{localhost:40017} 2024-11-27T03:35:05,827 INFO [Time-limited test {}] server.Server(415): Started @186871ms 2024-11-27T03:35:05,828 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:35:06,209 WARN [Thread-1421 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:35:06,212 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4b9a1f54c7cd536 with lease ID 0xe51c58e67f4a7024: from storage DS-3e3b894e-5647-4471-a448-9f351c76c63d node DatanodeRegistration(127.0.0.1:45331, datanodeUuid=6e5d0235-d396-4191-ae35-0410e7e037cc, infoPort=45121, infoSecurePort=0, ipcPort=34669, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:06,212 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4b9a1f54c7cd536 with lease ID 0xe51c58e67f4a7024: from storage DS-a3133cc5-c616-42ff-a520-e64096f5b4ef node DatanodeRegistration(127.0.0.1:45331, datanodeUuid=6e5d0235-d396-4191-ae35-0410e7e037cc, infoPort=45121, infoSecurePort=0, ipcPort=34669, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:06,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:06,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:06,367 WARN [Thread-1441 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:35:06,369 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2a774c6cb7808eb with lease ID 0xe51c58e67f4a7025: from storage DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c node DatanodeRegistration(127.0.0.1:44135, datanodeUuid=f85354ed-74a4-4d2e-98af-7ef9ac6645a0, infoPort=36875, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:06,370 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2a774c6cb7808eb with lease ID 0xe51c58e67f4a7025: from storage DS-7dc70174-d44f-4df2-9830-e30cafd70231 node DatanodeRegistration(127.0.0.1:44135, datanodeUuid=f85354ed-74a4-4d2e-98af-7ef9ac6645a0, infoPort=36875, infoSecurePort=0, ipcPort=35063, storageInfo=lv=-57;cid=testClusterID;nsid=2111020296;c=1732678484566), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:06,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:06,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:06,851 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-27T03:35:06,855 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-27T03:35:06,857 ERROR [FSHLog-0-hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d-prefix:66d3634da959,37903,1732678486962 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35307,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:06,858 WARN [FSHLog-0-hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d-prefix:66d3634da959,37903,1732678486962 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35307,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:06,858 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C37903%2C1732678486962:(num 1732678501500) roll requested 2024-11-27T03:35:06,859 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C37903%2C1732678486962.1732678506858 2024-11-27T03:35:06,869 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 newFile=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678506858 2024-11-27T03:35:06,870 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:06,870 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:06,870 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:06,871 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:06,871 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:06,871 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678506858 2024-11-27T03:35:06,871 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35307,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:06,872 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35307,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:06,872 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 2024-11-27T03:35:06,872 WARN [IPC Server handler 2 on default port 39505 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-27T03:35:06,873 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36875:36875),(127.0.0.1/127.0.0.1:45121:45121)] 2024-11-27T03:35:06,873 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 is not closed yet, will try archiving it next time 2024-11-27T03:35:06,873 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 after 1ms 2024-11-27T03:35:07,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:07,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:07,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:07,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:08,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:08,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:08,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:08,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:08,875 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C37903%2C1732678486962.1732678508875 2024-11-27T03:35:08,886 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678506858 newFile=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 2024-11-27T03:35:08,886 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:08,887 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:08,887 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:08,887 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:08,887 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:08,887 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678506858 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 2024-11-27T03:35:08,889 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45121:45121),(127.0.0.1/127.0.0.1:36875:36875)] 2024-11-27T03:35:08,889 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 is not closed yet, will try archiving it next time 2024-11-27T03:35:08,889 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678506858 is not closed yet, will try archiving it next time 2024-11-27T03:35:08,890 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 2024-11-27T03:35:08,890 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 2024-11-27T03:35:08,891 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 after 1ms 2024-11-27T03:35:08,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741838_1019 (size=1264) 2024-11-27T03:35:08,891 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 2024-11-27T03:35:08,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741838_1019 (size=1264) 2024-11-27T03:35:08,892 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 is not closed yet, will try archiving it next time 2024-11-27T03:35:08,900 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732678488477/Put/vlen=218/seqid=0] 2024-11-27T03:35:08,901 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732678498183/Put/vlen=1045/seqid=0] 2024-11-27T03:35:08,901 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678487581 2024-11-27T03:35:08,901 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 2024-11-27T03:35:08,901 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 2024-11-27T03:35:08,901 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 after 0ms 2024-11-27T03:35:08,901 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 2024-11-27T03:35:08,904 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732678501499/Put/vlen=1045/seqid=0] 2024-11-27T03:35:08,904 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732678503519/Put/vlen=1045/seqid=0] 2024-11-27T03:35:08,904 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 2024-11-27T03:35:08,904 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678506858 2024-11-27T03:35:08,904 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678506858 2024-11-27T03:35:08,905 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678506858 after 1ms 2024-11-27T03:35:08,905 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678506858 2024-11-27T03:35:08,908 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732678506857/Put/vlen=1045/seqid=0] 2024-11-27T03:35:08,908 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 2024-11-27T03:35:08,908 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 2024-11-27T03:35:08,908 WARN [IPC Server handler 0 on default port 39505 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-27T03:35:08,908 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 after 0ms 2024-11-27T03:35:09,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:09,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:09,387 WARN [ResponseProcessor for block BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:09,387 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1660988170_22 at /127.0.0.1:33148 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45331:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33148 dst: /127.0.0.1:45331 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45331 remote=/127.0.0.1:33148]. Total timeout mills is 60000, 59499 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:09,387 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1660988170_22 at /127.0.0.1:34806 [Receiving block BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44135:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34806 dst: /127.0.0.1:44135 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:09,387 WARN [DataStreamer for file /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 block BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45331,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK], DatanodeInfoWithStorage[127.0.0.1:44135,DS-9fdc7b4b-fc5a-4563-a9bd-b4cd3770c99c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45331,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]) is bad. 2024-11-27T03:35:09,388 WARN [DataStreamer for file /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 block BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:09,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741839_1022 (size=85) 2024-11-27T03:35:09,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:09,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:10,213 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-27T03:35:10,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:10,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:10,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 after 68088ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:10,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:10,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:10,875 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678501500 after 4003ms 2024-11-27T03:35:11,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:11,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:11,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:11,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:12,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:12,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:12,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:12,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:12,910 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 after 4002ms 2024-11-27T03:35:12,910 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 2024-11-27T03:35:12,920 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 2024-11-27T03:35:12,920 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7635b95fa1ac59cded4d75ff52d8a71e 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-27T03:35:12,921 ERROR [FSHLog-0-hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d-prefix:66d3634da959,37903,1732678486962 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:12,921 WARN [FSHLog-0-hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d-prefix:66d3634da959,37903,1732678486962 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:12,921 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C37903%2C1732678486962:(num 1732678508875) roll requested 2024-11-27T03:35:12,922 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C37903%2C1732678486962.1732678512921 2024-11-27T03:35:12,928 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 newFile=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678512921 2024-11-27T03:35:12,928 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:12,928 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:12,928 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:12,928 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:12,928 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:12,928 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678512921 2024-11-27T03:35:12,928 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:12,929 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-73260669-172.17.0.3-1732678484566:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:12,929 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 2024-11-27T03:35:12,930 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 after 1ms 2024-11-27T03:35:12,932 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.1732678508875 to hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/oldWALs/66d3634da959%2C37903%2C1732678486962.1732678508875 2024-11-27T03:35:12,932 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36875:36875),(127.0.0.1/127.0.0.1:45121:45121)] 2024-11-27T03:35:12,947 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/default/TestLogRolling-testLogRollOnPipelineRestart/7635b95fa1ac59cded4d75ff52d8a71e/.tmp/info/eac329fd27ea43ce98d14feaf4904e29 is 1080, key is row1002/info:/1732678498183/Put/seqid=0 2024-11-27T03:35:12,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741841_1024 (size=9270) 2024-11-27T03:35:12,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741841_1024 (size=9270) 2024-11-27T03:35:12,952 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/default/TestLogRolling-testLogRollOnPipelineRestart/7635b95fa1ac59cded4d75ff52d8a71e/.tmp/info/eac329fd27ea43ce98d14feaf4904e29 2024-11-27T03:35:12,958 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/default/TestLogRolling-testLogRollOnPipelineRestart/7635b95fa1ac59cded4d75ff52d8a71e/.tmp/info/eac329fd27ea43ce98d14feaf4904e29 as hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/default/TestLogRolling-testLogRollOnPipelineRestart/7635b95fa1ac59cded4d75ff52d8a71e/info/eac329fd27ea43ce98d14feaf4904e29 2024-11-27T03:35:12,964 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/default/TestLogRolling-testLogRollOnPipelineRestart/7635b95fa1ac59cded4d75ff52d8a71e/info/eac329fd27ea43ce98d14feaf4904e29, entries=4, sequenceid=8, filesize=9.1 K 2024-11-27T03:35:12,965 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 7635b95fa1ac59cded4d75ff52d8a71e in 45ms, sequenceid=8, compaction requested=false 2024-11-27T03:35:12,965 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7635b95fa1ac59cded4d75ff52d8a71e: 2024-11-27T03:35:12,965 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-27T03:35:12,965 ERROR [FSHLog-0-hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d-prefix:66d3634da959,37903,1732678486962.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:12,966 WARN [FSHLog-0-hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d-prefix:66d3634da959,37903,1732678486962.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:12,966 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C37903%2C1732678486962.meta:.meta(num 1732678487945) roll requested 2024-11-27T03:35:12,966 INFO [regionserver/66d3634da959:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C37903%2C1732678486962.meta.1732678512966.meta 2024-11-27T03:35:12,971 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:12,971 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:12,971 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:12,971 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:12,971 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:12,972 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.meta.1732678512966.meta 2024-11-27T03:35:12,972 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:12,972 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:12,972 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta 2024-11-27T03:35:12,972 WARN [IPC Server handler 3 on default port 39505 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1015 2024-11-27T03:35:12,973 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta after 1ms 2024-11-27T03:35:12,973 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45121:45121),(127.0.0.1/127.0.0.1:36875:36875)] 2024-11-27T03:35:12,973 DEBUG [regionserver/66d3634da959:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta is not closed yet, will try archiving it next time 2024-11-27T03:35:12,989 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/.tmp/info/066d0b1e5e9c49bebe6e6471a73e638a is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e./info:regioninfo/1732678488481/Put/seqid=0 2024-11-27T03:35:12,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741843_1027 (size=7125) 2024-11-27T03:35:12,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741843_1027 (size=7125) 2024-11-27T03:35:12,994 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/.tmp/info/066d0b1e5e9c49bebe6e6471a73e638a 2024-11-27T03:35:13,011 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/.tmp/ns/a8d1ad7ce1494184ae3e5b82ee2e2092 is 43, key is default/ns:d/1732678488031/Put/seqid=0 2024-11-27T03:35:13,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741844_1028 (size=5153) 2024-11-27T03:35:13,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741844_1028 (size=5153) 2024-11-27T03:35:13,016 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/.tmp/ns/a8d1ad7ce1494184ae3e5b82ee2e2092 2024-11-27T03:35:13,036 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/.tmp/table/06b6cfc47ca147c09cddd28426470f2d is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732678488491/Put/seqid=0 2024-11-27T03:35:13,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741845_1029 (size=5438) 2024-11-27T03:35:13,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741845_1029 (size=5438) 2024-11-27T03:35:13,041 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/.tmp/table/06b6cfc47ca147c09cddd28426470f2d 2024-11-27T03:35:13,047 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/.tmp/info/066d0b1e5e9c49bebe6e6471a73e638a as hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/info/066d0b1e5e9c49bebe6e6471a73e638a 2024-11-27T03:35:13,053 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/info/066d0b1e5e9c49bebe6e6471a73e638a, entries=10, sequenceid=11, filesize=7.0 K 2024-11-27T03:35:13,054 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/.tmp/ns/a8d1ad7ce1494184ae3e5b82ee2e2092 as hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/ns/a8d1ad7ce1494184ae3e5b82ee2e2092 2024-11-27T03:35:13,060 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/ns/a8d1ad7ce1494184ae3e5b82ee2e2092, entries=2, sequenceid=11, filesize=5.0 K 2024-11-27T03:35:13,061 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/.tmp/table/06b6cfc47ca147c09cddd28426470f2d as hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/table/06b6cfc47ca147c09cddd28426470f2d 2024-11-27T03:35:13,067 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/table/06b6cfc47ca147c09cddd28426470f2d, entries=2, sequenceid=11, filesize=5.3 K 2024-11-27T03:35:13,069 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 103ms, sequenceid=11, compaction requested=false 2024-11-27T03:35:13,069 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-27T03:35:13,074 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-27T03:35:13,074 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:35:13,074 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:35:13,074 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:35:13,074 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:35:13,074 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-27T03:35:13,074 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-27T03:35:13,075 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1343143184, stopped=false 2024-11-27T03:35:13,075 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=66d3634da959,45307,1732678486788 2024-11-27T03:35:13,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:35:13,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:35:13,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:13,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:13,152 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:35:13,153 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:35:13,153 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:35:13,153 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:35:13,153 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:35:13,153 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:35:13,153 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '66d3634da959,37903,1732678486962' ***** 2024-11-27T03:35:13,153 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-27T03:35:13,154 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(3091): Received CLOSE for 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(959): stopping server 66d3634da959,37903,1732678486962 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;66d3634da959:37903. 2024-11-27T03:35:13,154 DEBUG [RS:0;66d3634da959:37903 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:35:13,154 DEBUG [RS:0;66d3634da959:37903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:35:13,154 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7635b95fa1ac59cded4d75ff52d8a71e, disabling compactions & flushes 2024-11-27T03:35:13,154 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:35:13,154 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-27T03:35:13,154 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. after waiting 0 ms 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-27T03:35:13,154 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-27T03:35:13,154 INFO [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-27T03:35:13,154 DEBUG [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(1325): Online Regions={7635b95fa1ac59cded4d75ff52d8a71e=TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e., 1588230740=hbase:meta,,1.1588230740} 2024-11-27T03:35:13,155 DEBUG [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7635b95fa1ac59cded4d75ff52d8a71e 2024-11-27T03:35:13,155 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:35:13,155 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:35:13,155 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:35:13,155 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:35:13,155 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:35:13,159 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/default/TestLogRolling-testLogRollOnPipelineRestart/7635b95fa1ac59cded4d75ff52d8a71e/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-27T03:35:13,159 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-27T03:35:13,160 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:35:13,160 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7635b95fa1ac59cded4d75ff52d8a71e: Waiting for close lock at 1732678513154Running coprocessor pre-close hooks at 1732678513154Disabling compacts and flushes for region at 1732678513154Disabling writes for close at 1732678513154Writing region close event to WAL at 1732678513155 (+1 ms)Running coprocessor post-close hooks at 1732678513159 (+4 ms)Closed at 1732678513159 2024-11-27T03:35:13,160 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:35:13,160 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:35:13,160 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732678488105.7635b95fa1ac59cded4d75ff52d8a71e. 2024-11-27T03:35:13,160 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678513155Running coprocessor pre-close hooks at 1732678513155Disabling compacts and flushes for region at 1732678513155Disabling writes for close at 1732678513155Writing region close event to WAL at 1732678513156 (+1 ms)Running coprocessor post-close hooks at 1732678513160 (+4 ms)Closed at 1732678513160 2024-11-27T03:35:13,160 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-27T03:35:13,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:13,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:13,355 INFO [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(976): stopping server 66d3634da959,37903,1732678486962; all regions closed. 2024-11-27T03:35:13,355 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:13,355 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:13,356 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:13,356 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:13,356 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:13,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741842_1025 (size=825) 2024-11-27T03:35:13,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741842_1025 (size=825) 2024-11-27T03:35:13,449 INFO [regionserver/66d3634da959:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:35:13,518 INFO [regionserver/66d3634da959:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-27T03:35:13,518 INFO [regionserver/66d3634da959:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-27T03:35:13,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:13,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:14,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:14,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:14,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:14,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:15,230 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:35:15,231 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-27T03:35:15,232 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-27T03:35:15,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:15,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:15,372 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-27T03:35:15,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:15,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:16,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:16,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:16,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:16,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:16,746 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T03:35:16,974 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta after 4002ms 2024-11-27T03:35:16,975 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/WALs/66d3634da959,37903,1732678486962/66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta to hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/oldWALs/66d3634da959%2C37903%2C1732678486962.meta.1732678487945.meta 2024-11-27T03:35:16,984 DEBUG [RS:0;66d3634da959:37903 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/oldWALs 2024-11-27T03:35:16,985 INFO [RS:0;66d3634da959:37903 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C37903%2C1732678486962.meta:.meta(num 1732678512966) 2024-11-27T03:35:16,985 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:16,986 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:16,986 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:16,986 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:16,986 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741840_1023 (size=1162) 2024-11-27T03:35:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741840_1023 (size=1162) 2024-11-27T03:35:16,993 DEBUG [RS:0;66d3634da959:37903 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/oldWALs 2024-11-27T03:35:16,993 INFO [RS:0;66d3634da959:37903 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C37903%2C1732678486962:(num 1732678512921) 2024-11-27T03:35:16,993 DEBUG [RS:0;66d3634da959:37903 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:35:16,993 INFO [RS:0;66d3634da959:37903 {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:35:16,993 INFO [RS:0;66d3634da959:37903 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:35:16,994 INFO [RS:0;66d3634da959:37903 {}] hbase.ChoreService(370): Chore service for: regionserver/66d3634da959:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-27T03:35:16,994 INFO [RS:0;66d3634da959:37903 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:35:16,994 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:35:16,994 INFO [RS:0;66d3634da959:37903 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37903 2024-11-27T03:35:17,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:35:17,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d3634da959,37903,1732678486962 2024-11-27T03:35:17,068 INFO [RS:0;66d3634da959:37903 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:35:17,079 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d3634da959,37903,1732678486962] 2024-11-27T03:35:17,089 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/66d3634da959,37903,1732678486962 already deleted, retry=false 2024-11-27T03:35:17,089 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 66d3634da959,37903,1732678486962 expired; onlineServers=0 2024-11-27T03:35:17,090 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '66d3634da959,45307,1732678486788' ***** 2024-11-27T03:35:17,090 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-27T03:35:17,090 INFO [M:0;66d3634da959:45307 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:35:17,090 INFO [M:0;66d3634da959:45307 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:35:17,090 DEBUG [M:0;66d3634da959:45307 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-27T03:35:17,090 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-27T03:35:17,091 DEBUG [M:0;66d3634da959:45307 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-27T03:35:17,091 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678487311 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678487311,5,FailOnTimeoutGroup] 2024-11-27T03:35:17,091 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678487312 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678487312,5,FailOnTimeoutGroup] 2024-11-27T03:35:17,092 INFO [M:0;66d3634da959:45307 {}] hbase.ChoreService(370): Chore service for: master/66d3634da959:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-27T03:35:17,092 INFO [M:0;66d3634da959:45307 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:35:17,092 DEBUG [M:0;66d3634da959:45307 {}] master.HMaster(1795): Stopping service threads 2024-11-27T03:35:17,092 INFO [M:0;66d3634da959:45307 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-27T03:35:17,093 INFO [M:0;66d3634da959:45307 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:35:17,093 INFO [M:0;66d3634da959:45307 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-27T03:35:17,093 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-27T03:35:17,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-27T03:35:17,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:17,100 DEBUG [M:0;66d3634da959:45307 {}] zookeeper.ZKUtil(347): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-27T03:35:17,100 WARN [M:0;66d3634da959:45307 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-27T03:35:17,100 INFO [M:0;66d3634da959:45307 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/.lastflushedseqids 2024-11-27T03:35:17,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741846_1030 (size=111) 2024-11-27T03:35:17,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741846_1030 (size=111) 2024-11-27T03:35:17,108 INFO [M:0;66d3634da959:45307 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-27T03:35:17,108 INFO [M:0;66d3634da959:45307 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-27T03:35:17,109 DEBUG [M:0;66d3634da959:45307 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:35:17,109 INFO [M:0;66d3634da959:45307 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:35:17,109 DEBUG [M:0;66d3634da959:45307 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:35:17,109 DEBUG [M:0;66d3634da959:45307 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:35:17,109 DEBUG [M:0;66d3634da959:45307 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:35:17,109 INFO [M:0;66d3634da959:45307 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-27T03:35:17,109 ERROR [FSHLog-0-hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData-prefix:66d3634da959,45307,1732678486788 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:17,110 WARN [FSHLog-0-hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData-prefix:66d3634da959,45307,1732678486788 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:17,110 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 66d3634da959%2C45307%2C1732678486788:(num 1732678487089) roll requested 2024-11-27T03:35:17,110 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C45307%2C1732678486788.1732678517110 2024-11-27T03:35:17,116 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:17,116 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:17,116 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:17,117 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:17,117 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:17,117 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788/66d3634da959%2C45307%2C1732678486788.1732678487089 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788/66d3634da959%2C45307%2C1732678486788.1732678517110 2024-11-27T03:35:17,117 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:17,117 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46077,DS-3e3b894e-5647-4471-a448-9f351c76c63d,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-27T03:35:17,117 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788/66d3634da959%2C45307%2C1732678486788.1732678487089 2024-11-27T03:35:17,118 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45121:45121),(127.0.0.1/127.0.0.1:36875:36875)] 2024-11-27T03:35:17,118 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788/66d3634da959%2C45307%2C1732678486788.1732678487089 is not closed yet, will try archiving it next time 2024-11-27T03:35:17,118 WARN [IPC Server handler 4 on default port 39505 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788/66d3634da959%2C45307%2C1732678486788.1732678487089 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-27T03:35:17,118 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788/66d3634da959%2C45307%2C1732678486788.1732678487089 after 1ms 2024-11-27T03:35:17,131 DEBUG [M:0;66d3634da959:45307 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/599d60946f914a7cb3276f983565a072 is 82, key is hbase:meta,,1/info:regioninfo/1732678487977/Put/seqid=0 2024-11-27T03:35:17,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741848_1033 (size=5672) 2024-11-27T03:35:17,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741848_1033 (size=5672) 2024-11-27T03:35:17,136 INFO [M:0;66d3634da959:45307 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/599d60946f914a7cb3276f983565a072 2024-11-27T03:35:17,155 DEBUG [M:0;66d3634da959:45307 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc58143a29b848e4b71ab946cb8b8546 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732678488496/Put/seqid=0 2024-11-27T03:35:17,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741849_1034 (size=6117) 2024-11-27T03:35:17,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741849_1034 (size=6117) 2024-11-27T03:35:17,160 INFO [M:0;66d3634da959:45307 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc58143a29b848e4b71ab946cb8b8546 2024-11-27T03:35:17,178 DEBUG [M:0;66d3634da959:45307 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac562d6885624a7383c799391b84815b is 69, key is 66d3634da959,37903,1732678486962/rs:state/1732678487429/Put/seqid=0 2024-11-27T03:35:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:35:17,179 INFO [RS:0;66d3634da959:37903 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:35:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37903-0x1017a5b869d0001, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:35:17,179 INFO [RS:0;66d3634da959:37903 {}] regionserver.HRegionServer(1031): Exiting; stopping=66d3634da959,37903,1732678486962; zookeeper connection closed. 2024-11-27T03:35:17,179 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1c7e5ac9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1c7e5ac9 2024-11-27T03:35:17,180 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-27T03:35:17,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741850_1035 (size=5156) 2024-11-27T03:35:17,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741850_1035 (size=5156) 2024-11-27T03:35:17,183 INFO [M:0;66d3634da959:45307 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac562d6885624a7383c799391b84815b 2024-11-27T03:35:17,201 DEBUG [M:0;66d3634da959:45307 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9476070226e6437a87931f5c8ca78703 is 52, key is load_balancer_on/state:d/1732678488099/Put/seqid=0 2024-11-27T03:35:17,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741851_1036 (size=5056) 2024-11-27T03:35:17,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741851_1036 (size=5056) 2024-11-27T03:35:17,206 INFO [M:0;66d3634da959:45307 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9476070226e6437a87931f5c8ca78703 2024-11-27T03:35:17,211 DEBUG [M:0;66d3634da959:45307 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/599d60946f914a7cb3276f983565a072 as hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/599d60946f914a7cb3276f983565a072 2024-11-27T03:35:17,216 INFO [M:0;66d3634da959:45307 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/599d60946f914a7cb3276f983565a072, entries=8, sequenceid=56, filesize=5.5 K 2024-11-27T03:35:17,217 DEBUG [M:0;66d3634da959:45307 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/dc58143a29b848e4b71ab946cb8b8546 as hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dc58143a29b848e4b71ab946cb8b8546 2024-11-27T03:35:17,222 INFO [M:0;66d3634da959:45307 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/dc58143a29b848e4b71ab946cb8b8546, entries=6, sequenceid=56, filesize=6.0 K 2024-11-27T03:35:17,223 DEBUG [M:0;66d3634da959:45307 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ac562d6885624a7383c799391b84815b as hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ac562d6885624a7383c799391b84815b 2024-11-27T03:35:17,229 INFO [M:0;66d3634da959:45307 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ac562d6885624a7383c799391b84815b, entries=1, sequenceid=56, filesize=5.0 K 2024-11-27T03:35:17,230 DEBUG [M:0;66d3634da959:45307 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9476070226e6437a87931f5c8ca78703 as hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9476070226e6437a87931f5c8ca78703 2024-11-27T03:35:17,235 INFO [M:0;66d3634da959:45307 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9476070226e6437a87931f5c8ca78703, entries=1, sequenceid=56, filesize=4.9 K 2024-11-27T03:35:17,236 INFO [M:0;66d3634da959:45307 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=56, compaction requested=false 2024-11-27T03:35:17,238 INFO [M:0;66d3634da959:45307 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:35:17,238 DEBUG [M:0;66d3634da959:45307 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678517109Disabling compacts and flushes for region at 1732678517109Disabling writes for close at 1732678517109Obtaining lock to block concurrent updates at 1732678517109Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732678517109Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1732678517109Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732678517118 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732678517118Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732678517131 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732678517131Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732678517141 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732678517155 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732678517155Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732678517165 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732678517177 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732678517177Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732678517187 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732678517200 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732678517200Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16ad3431: reopening flushed file at 1732678517210 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@845bdf4: reopening flushed file at 1732678517216 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@753b6d30: reopening flushed file at 1732678517222 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5101bd3a: reopening flushed file at 1732678517229 (+7 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=56, compaction requested=false at 1732678517237 (+8 ms)Writing region close event to WAL at 1732678517238 (+1 ms)Closed at 1732678517238 2024-11-27T03:35:17,238 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:17,238 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:17,239 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:17,239 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:17,239 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:17,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741847_1031 (size=757) 2024-11-27T03:35:17,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45331 is added to blk_1073741847_1031 (size=757) 2024-11-27T03:35:17,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:17,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:17,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:17,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:18,160 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,160 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,180 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,186 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,190 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,190 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:18,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:18,371 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-27T03:35:18,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:18,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:18,692 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-27T03:35:18,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,693 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,694 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:18,718 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:19,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:19,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:19,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:19,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:20,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:20,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:20,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:20,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:21,119 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788/66d3634da959%2C45307%2C1732678486788.1732678487089 after 4002ms 2024-11-27T03:35:21,121 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/WALs/66d3634da959,45307,1732678486788/66d3634da959%2C45307%2C1732678486788.1732678487089 to hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/oldWALs/66d3634da959%2C45307%2C1732678486788.1732678487089 2024-11-27T03:35:21,129 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/MasterData/oldWALs/66d3634da959%2C45307%2C1732678486788.1732678487089 to hdfs://localhost:39505/user/jenkins/test-data/a443bfc6-2662-4ee9-1490-a31d78c5b68d/oldWALs/66d3634da959%2C45307%2C1732678486788.1732678487089$masterlocalwal$ 2024-11-27T03:35:21,129 INFO [M:0;66d3634da959:45307 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-27T03:35:21,129 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:35:21,130 INFO [M:0;66d3634da959:45307 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45307 2024-11-27T03:35:21,130 INFO [M:0;66d3634da959:45307 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:35:21,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:35:21,284 INFO [M:0;66d3634da959:45307 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:35:21,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45307-0x1017a5b869d0000, quorum=127.0.0.1:58527, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:35:21,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5e666703{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:21,291 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2dd41fe4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:35:21,291 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:35:21,291 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66269315{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:35:21,291 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7202b0aa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,STOPPED} 2024-11-27T03:35:21,294 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:35:21,294 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:35:21,294 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:35:21,294 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-73260669-172.17.0.3-1732678484566 (Datanode Uuid f85354ed-74a4-4d2e-98af-7ef9ac6645a0) service to localhost/127.0.0.1:39505 2024-11-27T03:35:21,295 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data3/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:21,295 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data4/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:21,295 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:35:21,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@647c40ef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:21,298 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38ca15c7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:35:21,298 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:35:21,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a15b38f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:35:21,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58c81822{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,STOPPED} 2024-11-27T03:35:21,299 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:35:21,299 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:35:21,299 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:35:21,299 WARN [BP-73260669-172.17.0.3-1732678484566 heartbeating to localhost/127.0.0.1:39505 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-73260669-172.17.0.3-1732678484566 (Datanode Uuid 6e5d0235-d396-4191-ae35-0410e7e037cc) service to localhost/127.0.0.1:39505 2024-11-27T03:35:21,300 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data1/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:21,300 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/cluster_1c380653-2795-626b-2075-a71d1c6a40fd/data/data2/current/BP-73260669-172.17.0.3-1732678484566 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:35:21,300 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:35:21,305 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@771a779f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:35:21,305 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@23fcbb95{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:35:21,306 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:35:21,306 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@624b3986{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:35:21,306 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42369482{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir/,STOPPED} 2024-11-27T03:35:21,311 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-27T03:35:21,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:21,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:21,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-27T03:35:21,337 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=183 (was 155) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39505 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39505 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:39505 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39505 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39505 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39505 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39505 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39505 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 431) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=77 (was 46) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7851 (was 7995) 2024-11-27T03:35:21,343 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=183, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=77, ProcessCount=11, AvailableMemoryMB=7851 2024-11-27T03:35:21,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-27T03:35:21,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.log.dir so I do NOT create it in target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc 2024-11-27T03:35:21,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1c230238-959d-0e07-cdf3-5a4c197fcb4d/hadoop.tmp.dir so I do NOT create it in target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc 2024-11-27T03:35:21,343 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d, deleteOnExit=true 2024-11-27T03:35:21,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-27T03:35:21,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/test.cache.data in system properties and HBase conf 2024-11-27T03:35:21,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/hadoop.tmp.dir in system properties and HBase conf 2024-11-27T03:35:21,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/hadoop.log.dir in system properties and HBase conf 2024-11-27T03:35:21,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-27T03:35:21,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-27T03:35:21,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-27T03:35:21,344 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-27T03:35:21,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:35:21,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:35:21,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-27T03:35:21,344 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:35:21,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-27T03:35:21,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-27T03:35:21,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:35:21,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:35:21,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-27T03:35:21,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/nfs.dump.dir in system properties and HBase conf 2024-11-27T03:35:21,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/java.io.tmpdir in system properties and HBase conf 2024-11-27T03:35:21,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:35:21,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-27T03:35:21,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-27T03:35:21,358 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:35:21,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:21,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:21,710 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:35:21,714 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:35:21,715 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:35:21,715 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:35:21,715 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:35:21,716 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:35:21,718 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1106c0e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:35:21,719 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ea88c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:35:21,811 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ec454b0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/java.io.tmpdir/jetty-localhost-42525-hadoop-hdfs-3_4_1-tests_jar-_-any-531362310680428979/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:35:21,812 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fdc15a6{HTTP/1.1, (http/1.1)}{localhost:42525} 2024-11-27T03:35:21,812 INFO [Time-limited test {}] server.Server(415): Started @202856ms 2024-11-27T03:35:21,823 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:35:22,083 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:35:22,086 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:35:22,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:35:22,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:35:22,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:35:22,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b135886{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:35:22,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc731d8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:35:22,179 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74d20704{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/java.io.tmpdir/jetty-localhost-37479-hadoop-hdfs-3_4_1-tests_jar-_-any-6666564320964377376/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:22,180 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54e83224{HTTP/1.1, (http/1.1)}{localhost:37479} 2024-11-27T03:35:22,180 INFO [Time-limited test {}] server.Server(415): Started @203224ms 2024-11-27T03:35:22,181 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:35:22,204 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:35:22,207 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:35:22,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:35:22,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:35:22,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:35:22,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74e6f5d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:35:22,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@317cfbff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:35:22,299 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@65c292b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/java.io.tmpdir/jetty-localhost-42183-hadoop-hdfs-3_4_1-tests_jar-_-any-2708897329975639169/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:35:22,300 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6719db8a{HTTP/1.1, (http/1.1)}{localhost:42183} 2024-11-27T03:35:22,300 INFO [Time-limited test {}] server.Server(415): Started @203344ms 2024-11-27T03:35:22,301 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:35:22,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:22,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:22,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:22,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:23,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:23,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:23,370 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/data/data1/current/BP-1891682051-172.17.0.3-1732678521368/current, will proceed with Du for space computation calculation, 2024-11-27T03:35:23,370 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/data/data2/current/BP-1891682051-172.17.0.3-1732678521368/current, will proceed with Du for space computation calculation, 2024-11-27T03:35:23,387 WARN [Thread-1625 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:35:23,389 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1896c042716cec8 with lease ID 0xb5be40f9cc977768: Processing first storage report for DS-b84a90db-d8a2-4a67-b642-55fc5fe5fc1d from datanode DatanodeRegistration(127.0.0.1:34045, datanodeUuid=ac2b6ff5-d558-442a-82bb-5f4f0014dcc3, infoPort=45029, infoSecurePort=0, ipcPort=39067, storageInfo=lv=-57;cid=testClusterID;nsid=1888117206;c=1732678521368) 2024-11-27T03:35:23,389 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1896c042716cec8 with lease ID 0xb5be40f9cc977768: from storage DS-b84a90db-d8a2-4a67-b642-55fc5fe5fc1d node DatanodeRegistration(127.0.0.1:34045, datanodeUuid=ac2b6ff5-d558-442a-82bb-5f4f0014dcc3, infoPort=45029, infoSecurePort=0, ipcPort=39067, storageInfo=lv=-57;cid=testClusterID;nsid=1888117206;c=1732678521368), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:23,389 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb1896c042716cec8 with lease ID 0xb5be40f9cc977768: Processing first storage report for DS-c3e8febb-84bd-4be2-ae8a-bac2b5ac550b from datanode DatanodeRegistration(127.0.0.1:34045, datanodeUuid=ac2b6ff5-d558-442a-82bb-5f4f0014dcc3, infoPort=45029, infoSecurePort=0, ipcPort=39067, storageInfo=lv=-57;cid=testClusterID;nsid=1888117206;c=1732678521368) 2024-11-27T03:35:23,389 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb1896c042716cec8 with lease ID 0xb5be40f9cc977768: from storage DS-c3e8febb-84bd-4be2-ae8a-bac2b5ac550b node DatanodeRegistration(127.0.0.1:34045, datanodeUuid=ac2b6ff5-d558-442a-82bb-5f4f0014dcc3, infoPort=45029, infoSecurePort=0, ipcPort=39067, storageInfo=lv=-57;cid=testClusterID;nsid=1888117206;c=1732678521368), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:23,496 WARN [Thread-1672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/data/data3/current/BP-1891682051-172.17.0.3-1732678521368/current, will proceed with Du for space computation calculation, 2024-11-27T03:35:23,496 WARN [Thread-1673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/data/data4/current/BP-1891682051-172.17.0.3-1732678521368/current, will proceed with Du for space computation calculation, 2024-11-27T03:35:23,516 WARN [Thread-1648 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:35:23,518 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa9e2107abfa61b53 with lease ID 0xb5be40f9cc977769: Processing first storage report for DS-a9a37ad2-2a77-48e8-bf97-97ecefcb427b from datanode DatanodeRegistration(127.0.0.1:37469, datanodeUuid=c0e39c88-aa42-4f3c-ad95-b0ce9c5ef496, infoPort=42537, infoSecurePort=0, ipcPort=45445, storageInfo=lv=-57;cid=testClusterID;nsid=1888117206;c=1732678521368) 2024-11-27T03:35:23,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa9e2107abfa61b53 with lease ID 0xb5be40f9cc977769: from storage DS-a9a37ad2-2a77-48e8-bf97-97ecefcb427b node DatanodeRegistration(127.0.0.1:37469, datanodeUuid=c0e39c88-aa42-4f3c-ad95-b0ce9c5ef496, infoPort=42537, infoSecurePort=0, ipcPort=45445, storageInfo=lv=-57;cid=testClusterID;nsid=1888117206;c=1732678521368), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:23,518 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa9e2107abfa61b53 with lease ID 0xb5be40f9cc977769: Processing first storage report for DS-1540c490-8e4b-4cb1-bb28-5956ea92df9a from datanode DatanodeRegistration(127.0.0.1:37469, datanodeUuid=c0e39c88-aa42-4f3c-ad95-b0ce9c5ef496, infoPort=42537, infoSecurePort=0, ipcPort=45445, storageInfo=lv=-57;cid=testClusterID;nsid=1888117206;c=1732678521368) 2024-11-27T03:35:23,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa9e2107abfa61b53 with lease ID 0xb5be40f9cc977769: from storage DS-1540c490-8e4b-4cb1-bb28-5956ea92df9a node DatanodeRegistration(127.0.0.1:37469, datanodeUuid=c0e39c88-aa42-4f3c-ad95-b0ce9c5ef496, infoPort=42537, infoSecurePort=0, ipcPort=45445, storageInfo=lv=-57;cid=testClusterID;nsid=1888117206;c=1732678521368), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:35:23,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:23,533 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc 2024-11-27T03:35:23,535 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/zookeeper_0, clientPort=55892, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-27T03:35:23,535 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55892 2024-11-27T03:35:23,536 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:35:23,537 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:35:23,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:35:23,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:35:23,549 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147 with version=8 2024-11-27T03:35:23,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/hbase-staging 2024-11-27T03:35:23,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:23,551 INFO [Time-limited test {}] client.ConnectionUtils(128): master/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:35:23,551 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:35:23,551 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:35:23,551 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:35:23,551 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:35:23,551 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:35:23,551 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-27T03:35:23,551 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:35:23,552 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36843 2024-11-27T03:35:23,554 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36843 connecting to ZooKeeper ensemble=127.0.0.1:55892 2024-11-27T03:35:23,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:368430x0, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:35:23,611 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36843-0x1017a5c163a0000 connected 2024-11-27T03:35:23,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:35:23,729 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:35:23,732 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:35:23,732 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147, hbase.cluster.distributed=false 2024-11-27T03:35:23,735 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:35:23,735 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36843 2024-11-27T03:35:23,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36843 2024-11-27T03:35:23,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36843 2024-11-27T03:35:23,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36843 2024-11-27T03:35:23,736 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36843 2024-11-27T03:35:23,752 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:35:23,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:35:23,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:35:23,752 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:35:23,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:35:23,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:35:23,753 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T03:35:23,753 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:35:23,753 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42215 2024-11-27T03:35:23,755 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42215 connecting to ZooKeeper ensemble=127.0.0.1:55892 2024-11-27T03:35:23,755 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:35:23,757 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:35:23,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:422150x0, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:35:23,768 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:422150x0, quorum=127.0.0.1:55892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:35:23,768 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42215-0x1017a5c163a0001 connected 2024-11-27T03:35:23,768 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-27T03:35:23,769 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-27T03:35:23,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T03:35:23,771 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:35:23,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42215 2024-11-27T03:35:23,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42215 2024-11-27T03:35:23,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42215 2024-11-27T03:35:23,773 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42215 2024-11-27T03:35:23,773 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42215 2024-11-27T03:35:23,787 DEBUG [M:0;66d3634da959:36843 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;66d3634da959:36843 2024-11-27T03:35:23,787 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/66d3634da959,36843,1732678523551 2024-11-27T03:35:23,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:35:23,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:35:23,800 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/66d3634da959,36843,1732678523551 2024-11-27T03:35:23,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T03:35:23,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:23,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:23,810 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T03:35:23,811 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/66d3634da959,36843,1732678523551 from backup master directory 2024-11-27T03:35:23,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/66d3634da959,36843,1732678523551 2024-11-27T03:35:23,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:35:23,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:35:23,820 WARN [master/66d3634da959:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:35:23,821 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=66d3634da959,36843,1732678523551 2024-11-27T03:35:23,828 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/hbase.id] with ID: 35364198-fbe3-4b32-b92c-4866c88e63d1 2024-11-27T03:35:23,828 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/.tmp/hbase.id 2024-11-27T03:35:23,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:35:23,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:35:23,838 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/.tmp/hbase.id]:[hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/hbase.id] 2024-11-27T03:35:23,857 INFO [master/66d3634da959:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:35:23,858 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-27T03:35:23,860 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-27T03:35:23,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:23,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:23,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:35:23,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:35:23,886 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:35:23,887 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-27T03:35:23,887 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:35:23,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:35:23,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:35:23,896 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store 2024-11-27T03:35:23,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:35:23,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:35:23,903 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:35:23,903 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:35:23,903 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:35:23,903 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:35:23,903 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:35:23,903 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:35:23,903 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:35:23,903 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678523903Disabling compacts and flushes for region at 1732678523903Disabling writes for close at 1732678523903Writing region close event to WAL at 1732678523903Closed at 1732678523903 2024-11-27T03:35:23,904 WARN [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/.initializing 2024-11-27T03:35:23,904 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/WALs/66d3634da959,36843,1732678523551 2024-11-27T03:35:23,906 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C36843%2C1732678523551, suffix=, logDir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/WALs/66d3634da959,36843,1732678523551, archiveDir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/oldWALs, maxLogs=10 2024-11-27T03:35:23,906 INFO [master/66d3634da959:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C36843%2C1732678523551.1732678523906 2024-11-27T03:35:23,911 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/WALs/66d3634da959,36843,1732678523551/66d3634da959%2C36843%2C1732678523551.1732678523906 2024-11-27T03:35:23,912 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42537:42537),(127.0.0.1/127.0.0.1:45029:45029)] 2024-11-27T03:35:23,913 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:35:23,913 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:35:23,913 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,913 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,914 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,916 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-27T03:35:23,916 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:23,916 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:35:23,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,918 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-27T03:35:23,918 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:23,918 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:35:23,918 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-27T03:35:23,920 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:23,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:35:23,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,922 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-27T03:35:23,922 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:23,922 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:35:23,922 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,923 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,924 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,926 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,926 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,927 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-27T03:35:23,928 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:35:23,930 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:35:23,931 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734316, jitterRate=-0.06626985967159271}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-27T03:35:23,932 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732678523913Initializing all the Stores at 1732678523914 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678523914Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678523914Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678523914Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678523914Cleaning up temporary data from old regions at 1732678523926 (+12 ms)Region opened successfully at 1732678523932 (+6 ms) 2024-11-27T03:35:23,932 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-27T03:35:23,935 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6491aabb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:35:23,936 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-27T03:35:23,937 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-27T03:35:23,937 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-27T03:35:23,937 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-27T03:35:23,937 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-27T03:35:23,937 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-27T03:35:23,938 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-27T03:35:23,940 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-27T03:35:23,940 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-27T03:35:23,952 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-27T03:35:23,952 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-27T03:35:23,953 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-27T03:35:23,962 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-27T03:35:23,963 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-27T03:35:23,964 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-27T03:35:23,973 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-27T03:35:23,974 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-27T03:35:23,983 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-27T03:35:23,988 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-27T03:35:23,999 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-27T03:35:24,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:35:24,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:35:24,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:24,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:24,012 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=66d3634da959,36843,1732678523551, sessionid=0x1017a5c163a0000, setting cluster-up flag (Was=false) 2024-11-27T03:35:24,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:24,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:24,067 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-27T03:35:24,069 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,36843,1732678523551 2024-11-27T03:35:24,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:24,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:24,120 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-27T03:35:24,121 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,36843,1732678523551 2024-11-27T03:35:24,123 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-27T03:35:24,125 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-27T03:35:24,125 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-27T03:35:24,125 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-27T03:35:24,125 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 66d3634da959,36843,1732678523551 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-27T03:35:24,127 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:35:24,127 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:35:24,127 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:35:24,127 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:35:24,127 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/66d3634da959:0, corePoolSize=10, maxPoolSize=10 2024-11-27T03:35:24,127 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,127 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:35:24,127 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,128 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732678554128 2024-11-27T03:35:24,128 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-27T03:35:24,128 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-27T03:35:24,128 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-27T03:35:24,128 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-27T03:35:24,128 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-27T03:35:24,128 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-27T03:35:24,128 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,129 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:35:24,129 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-27T03:35:24,130 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:24,130 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T03:35:24,133 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-27T03:35:24,133 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-27T03:35:24,133 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-27T03:35:24,134 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-27T03:35:24,134 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-27T03:35:24,134 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678524134,5,FailOnTimeoutGroup] 2024-11-27T03:35:24,135 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678524134,5,FailOnTimeoutGroup] 2024-11-27T03:35:24,135 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,135 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-27T03:35:24,135 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,135 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:35:24,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:35:24,138 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-27T03:35:24,139 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147 2024-11-27T03:35:24,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:35:24,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:35:24,146 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:35:24,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:35:24,149 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:35:24,149 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:24,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:35:24,149 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:35:24,150 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:35:24,150 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:24,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:35:24,151 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:35:24,152 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:35:24,152 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:24,152 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:35:24,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:35:24,154 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:35:24,154 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:24,154 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:35:24,154 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:35:24,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740 2024-11-27T03:35:24,155 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740 2024-11-27T03:35:24,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:35:24,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:35:24,157 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:35:24,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:35:24,160 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:35:24,160 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=706312, jitterRate=-0.10187821090221405}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:35:24,160 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732678524146Initializing all the Stores at 1732678524147 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678524147Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678524147Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678524148 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678524148Cleaning up temporary data from old regions at 1732678524157 (+9 ms)Region opened successfully at 1732678524160 (+3 ms) 2024-11-27T03:35:24,160 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:35:24,160 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:35:24,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:35:24,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:35:24,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:35:24,161 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:35:24,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678524160Disabling compacts and flushes for region at 1732678524160Disabling writes for close at 1732678524161 (+1 ms)Writing region close event to WAL at 1732678524161Closed at 1732678524161 2024-11-27T03:35:24,162 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:35:24,162 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-27T03:35:24,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-27T03:35:24,163 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:35:24,164 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-27T03:35:24,175 INFO [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(746): ClusterId : 35364198-fbe3-4b32-b92c-4866c88e63d1 2024-11-27T03:35:24,175 DEBUG [RS:0;66d3634da959:42215 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-27T03:35:24,184 DEBUG [RS:0;66d3634da959:42215 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-27T03:35:24,184 DEBUG [RS:0;66d3634da959:42215 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-27T03:35:24,195 DEBUG [RS:0;66d3634da959:42215 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-27T03:35:24,195 DEBUG [RS:0;66d3634da959:42215 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f9cc52d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:35:24,205 DEBUG [RS:0;66d3634da959:42215 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;66d3634da959:42215 2024-11-27T03:35:24,205 INFO [RS:0;66d3634da959:42215 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-27T03:35:24,205 INFO [RS:0;66d3634da959:42215 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-27T03:35:24,205 DEBUG [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-27T03:35:24,206 INFO [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(2659): reportForDuty to master=66d3634da959,36843,1732678523551 with port=42215, startcode=1732678523752 2024-11-27T03:35:24,206 DEBUG [RS:0;66d3634da959:42215 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T03:35:24,208 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39903, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T03:35:24,209 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36843 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 66d3634da959,42215,1732678523752 2024-11-27T03:35:24,209 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36843 {}] master.ServerManager(517): Registering regionserver=66d3634da959,42215,1732678523752 2024-11-27T03:35:24,210 DEBUG [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147 2024-11-27T03:35:24,210 DEBUG [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43791 2024-11-27T03:35:24,210 DEBUG [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-27T03:35:24,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:35:24,221 DEBUG [RS:0;66d3634da959:42215 {}] zookeeper.ZKUtil(111): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d3634da959,42215,1732678523752 2024-11-27T03:35:24,221 WARN [RS:0;66d3634da959:42215 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:35:24,221 INFO [RS:0;66d3634da959:42215 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:35:24,221 DEBUG [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752 2024-11-27T03:35:24,221 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d3634da959,42215,1732678523752] 2024-11-27T03:35:24,225 INFO [RS:0;66d3634da959:42215 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-27T03:35:24,226 INFO [RS:0;66d3634da959:42215 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-27T03:35:24,226 INFO [RS:0;66d3634da959:42215 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T03:35:24,226 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,226 INFO [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-27T03:35:24,227 INFO [RS:0;66d3634da959:42215 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-27T03:35:24,227 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,227 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:35:24,228 DEBUG [RS:0;66d3634da959:42215 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:35:24,229 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,229 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,229 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,229 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,229 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,229 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,42215,1732678523752-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:35:24,244 INFO [RS:0;66d3634da959:42215 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-27T03:35:24,244 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,42215,1732678523752-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,244 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,244 INFO [RS:0;66d3634da959:42215 {}] regionserver.Replication(171): 66d3634da959,42215,1732678523752 started 2024-11-27T03:35:24,258 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,258 INFO [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(1482): Serving as 66d3634da959,42215,1732678523752, RpcServer on 66d3634da959/172.17.0.3:42215, sessionid=0x1017a5c163a0001 2024-11-27T03:35:24,258 DEBUG [RS:0;66d3634da959:42215 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-27T03:35:24,258 DEBUG [RS:0;66d3634da959:42215 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d3634da959,42215,1732678523752 2024-11-27T03:35:24,258 DEBUG [RS:0;66d3634da959:42215 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,42215,1732678523752' 2024-11-27T03:35:24,258 DEBUG [RS:0;66d3634da959:42215 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-27T03:35:24,259 DEBUG [RS:0;66d3634da959:42215 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-27T03:35:24,259 DEBUG [RS:0;66d3634da959:42215 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-27T03:35:24,259 DEBUG [RS:0;66d3634da959:42215 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-27T03:35:24,259 DEBUG [RS:0;66d3634da959:42215 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d3634da959,42215,1732678523752 2024-11-27T03:35:24,259 DEBUG [RS:0;66d3634da959:42215 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,42215,1732678523752' 2024-11-27T03:35:24,259 DEBUG [RS:0;66d3634da959:42215 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-27T03:35:24,259 DEBUG [RS:0;66d3634da959:42215 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-27T03:35:24,260 DEBUG [RS:0;66d3634da959:42215 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-27T03:35:24,260 INFO [RS:0;66d3634da959:42215 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-27T03:35:24,260 INFO [RS:0;66d3634da959:42215 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-27T03:35:24,315 WARN [66d3634da959:36843 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-27T03:35:24,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:24,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:24,362 INFO [RS:0;66d3634da959:42215 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C42215%2C1732678523752, suffix=, logDir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752, archiveDir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/oldWALs, maxLogs=32 2024-11-27T03:35:24,363 INFO [RS:0;66d3634da959:42215 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C42215%2C1732678523752.1732678524363 2024-11-27T03:35:24,368 INFO [RS:0;66d3634da959:42215 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.1732678524363 2024-11-27T03:35:24,377 DEBUG [RS:0;66d3634da959:42215 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42537:42537),(127.0.0.1/127.0.0.1:45029:45029)] 2024-11-27T03:35:24,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:24,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:24,565 DEBUG [66d3634da959:36843 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-27T03:35:24,566 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=66d3634da959,42215,1732678523752 2024-11-27T03:35:24,569 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,42215,1732678523752, state=OPENING 2024-11-27T03:35:24,615 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-27T03:35:24,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:24,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:35:24,627 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:35:24,627 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:35:24,627 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:35:24,627 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,42215,1732678523752}] 2024-11-27T03:35:24,783 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-27T03:35:24,787 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42329, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-27T03:35:24,791 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-27T03:35:24,792 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:35:24,794 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C42215%2C1732678523752.meta, suffix=.meta, logDir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752, archiveDir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/oldWALs, maxLogs=32 2024-11-27T03:35:24,795 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C42215%2C1732678523752.meta.1732678524795.meta 2024-11-27T03:35:24,801 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.meta.1732678524795.meta 2024-11-27T03:35:24,802 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42537:42537),(127.0.0.1/127.0.0.1:45029:45029)] 2024-11-27T03:35:24,804 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:35:24,804 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-27T03:35:24,804 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-27T03:35:24,804 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-27T03:35:24,805 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-27T03:35:24,805 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:35:24,805 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-27T03:35:24,805 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-27T03:35:24,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:35:24,808 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:35:24,808 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:24,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:35:24,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:35:24,810 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:35:24,810 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:24,810 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:35:24,810 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:35:24,811 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:35:24,811 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:24,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:35:24,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:35:24,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:35:24,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:24,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:35:24,813 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:35:24,814 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740 2024-11-27T03:35:24,815 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740 2024-11-27T03:35:24,816 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:35:24,816 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:35:24,817 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:35:24,818 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:35:24,819 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757856, jitterRate=-0.03633716702461243}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:35:24,819 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-27T03:35:24,820 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732678524805Writing region info on filesystem at 1732678524805Initializing all the Stores at 1732678524806 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678524806Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678524807 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678524807Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678524807Cleaning up temporary data from old regions at 1732678524816 (+9 ms)Running coprocessor post-open hooks at 1732678524819 (+3 ms)Region opened successfully at 1732678524820 (+1 ms) 2024-11-27T03:35:24,821 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732678524783 2024-11-27T03:35:24,823 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-27T03:35:24,823 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-27T03:35:24,824 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,42215,1732678523752 2024-11-27T03:35:24,825 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,42215,1732678523752, state=OPEN 2024-11-27T03:35:24,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:35:24,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:35:24,871 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=66d3634da959,42215,1732678523752 2024-11-27T03:35:24,871 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:35:24,871 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:35:24,878 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-27T03:35:24,878 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,42215,1732678523752 in 244 msec 2024-11-27T03:35:24,884 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-27T03:35:24,884 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 716 msec 2024-11-27T03:35:24,886 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:35:24,886 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-27T03:35:24,888 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:35:24,888 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,42215,1732678523752, seqNum=-1] 2024-11-27T03:35:24,888 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:35:24,890 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41873, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:35:24,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 771 msec 2024-11-27T03:35:24,896 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732678524896, completionTime=-1 2024-11-27T03:35:24,896 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-27T03:35:24,896 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-27T03:35:24,898 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-27T03:35:24,898 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732678584898 2024-11-27T03:35:24,898 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732678644898 2024-11-27T03:35:24,898 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-27T03:35:24,898 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,36843,1732678523551-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,898 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,36843,1732678523551-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,898 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,36843,1732678523551-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,898 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-66d3634da959:36843, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,898 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,899 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,900 DEBUG [master/66d3634da959:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-27T03:35:24,902 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.081sec 2024-11-27T03:35:24,902 INFO [master/66d3634da959:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-27T03:35:24,902 INFO [master/66d3634da959:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-27T03:35:24,902 INFO [master/66d3634da959:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-27T03:35:24,902 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-27T03:35:24,902 INFO [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-27T03:35:24,902 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,36843,1732678523551-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:35:24,902 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,36843,1732678523551-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-27T03:35:24,904 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-27T03:35:24,904 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-27T03:35:24,904 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,36843,1732678523551-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:35:24,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6800c4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:35:24,976 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 66d3634da959,36843,-1 for getting cluster id 2024-11-27T03:35:24,976 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-27T03:35:24,978 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '35364198-fbe3-4b32-b92c-4866c88e63d1' 2024-11-27T03:35:24,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-27T03:35:24,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "35364198-fbe3-4b32-b92c-4866c88e63d1" 2024-11-27T03:35:24,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a75adf6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:35:24,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [66d3634da959,36843,-1] 2024-11-27T03:35:24,979 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-27T03:35:24,980 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:35:24,982 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50290, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-27T03:35:24,983 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fe31621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:35:24,984 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:35:24,986 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,42215,1732678523752, seqNum=-1] 2024-11-27T03:35:24,986 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:35:24,988 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54244, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:35:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=66d3634da959,36843,1732678523551 2024-11-27T03:35:24,990 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:35:24,992 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-27T03:35:24,993 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-27T03:35:24,993 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 66d3634da959,36843,1732678523551 2024-11-27T03:35:24,994 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@13e8f9ed 2024-11-27T03:35:24,994 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T03:35:24,995 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50292, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T03:35:24,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-27T03:35:24,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-27T03:35:24,995 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:35:24,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:35:24,998 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T03:35:24,998 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:24,998 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-27T03:35:24,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-27T03:35:24,999 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T03:35:25,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741835_1011 (size=405) 2024-11-27T03:35:25,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741835_1011 (size=405) 2024-11-27T03:35:25,009 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e4ad7d2a05435a9c1ec66673639204a4, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147 2024-11-27T03:35:25,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741836_1012 (size=88) 2024-11-27T03:35:25,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741836_1012 (size=88) 2024-11-27T03:35:25,015 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:35:25,015 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing e4ad7d2a05435a9c1ec66673639204a4, disabling compactions & flushes 2024-11-27T03:35:25,015 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:25,015 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:25,015 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. after waiting 0 ms 2024-11-27T03:35:25,015 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:25,015 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:25,016 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for e4ad7d2a05435a9c1ec66673639204a4: Waiting for close lock at 1732678525015Disabling compacts and flushes for region at 1732678525015Disabling writes for close at 1732678525015Writing region close event to WAL at 1732678525015Closed at 1732678525015 2024-11-27T03:35:25,017 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T03:35:25,017 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732678525017"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732678525017"}]},"ts":"1732678525017"} 2024-11-27T03:35:25,019 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-27T03:35:25,020 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T03:35:25,020 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732678525020"}]},"ts":"1732678525020"} 2024-11-27T03:35:25,022 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-27T03:35:25,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=e4ad7d2a05435a9c1ec66673639204a4, ASSIGN}] 2024-11-27T03:35:25,024 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=e4ad7d2a05435a9c1ec66673639204a4, ASSIGN 2024-11-27T03:35:25,025 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=e4ad7d2a05435a9c1ec66673639204a4, ASSIGN; state=OFFLINE, location=66d3634da959,42215,1732678523752; forceNewPlan=false, retain=false 2024-11-27T03:35:25,176 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e4ad7d2a05435a9c1ec66673639204a4, regionState=OPENING, regionLocation=66d3634da959,42215,1732678523752 2024-11-27T03:35:25,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=e4ad7d2a05435a9c1ec66673639204a4, ASSIGN because future has completed 2024-11-27T03:35:25,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4ad7d2a05435a9c1ec66673639204a4, server=66d3634da959,42215,1732678523752}] 2024-11-27T03:35:25,230 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-27T03:35:25,230 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-27T03:35:25,231 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:35:25,232 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-27T03:35:25,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:25,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:25,342 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:25,342 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e4ad7d2a05435a9c1ec66673639204a4, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:35:25,343 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,343 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:35:25,343 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,343 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,346 INFO [StoreOpener-e4ad7d2a05435a9c1ec66673639204a4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,347 INFO [StoreOpener-e4ad7d2a05435a9c1ec66673639204a4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4ad7d2a05435a9c1ec66673639204a4 columnFamilyName info 2024-11-27T03:35:25,347 DEBUG [StoreOpener-e4ad7d2a05435a9c1ec66673639204a4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:35:25,348 INFO [StoreOpener-e4ad7d2a05435a9c1ec66673639204a4-1 {}] regionserver.HStore(327): Store=e4ad7d2a05435a9c1ec66673639204a4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:35:25,348 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,348 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,349 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,349 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,349 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,351 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,353 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:35:25,353 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e4ad7d2a05435a9c1ec66673639204a4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=738010, jitterRate=-0.06157180666923523}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-27T03:35:25,353 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:35:25,354 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e4ad7d2a05435a9c1ec66673639204a4: Running coprocessor pre-open hook at 1732678525344Writing region info on filesystem at 1732678525344Initializing all the Stores at 1732678525345 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678525345Cleaning up temporary data from old regions at 1732678525349 (+4 ms)Running coprocessor post-open hooks at 1732678525354 (+5 ms)Region opened successfully at 1732678525354 2024-11-27T03:35:25,355 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4., pid=6, masterSystemTime=1732678525334 2024-11-27T03:35:25,357 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:25,358 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:25,359 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e4ad7d2a05435a9c1ec66673639204a4, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,42215,1732678523752 2024-11-27T03:35:25,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4ad7d2a05435a9c1ec66673639204a4, server=66d3634da959,42215,1732678523752 because future has completed 2024-11-27T03:35:25,365 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-27T03:35:25,365 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e4ad7d2a05435a9c1ec66673639204a4, server=66d3634da959,42215,1732678523752 in 182 msec 2024-11-27T03:35:25,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-27T03:35:25,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=e4ad7d2a05435a9c1ec66673639204a4, ASSIGN in 343 msec 2024-11-27T03:35:25,368 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T03:35:25,368 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732678525368"}]},"ts":"1732678525368"} 2024-11-27T03:35:25,370 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-27T03:35:25,371 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T03:35:25,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 376 msec 2024-11-27T03:35:25,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:25,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:26,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:26,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:26,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:26,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:27,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:27,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:27,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 after 68077ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:27,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:27,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:28,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:28,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:28,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:28,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:29,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:29,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:29,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:29,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:29,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,806 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,824 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:29,830 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:30,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:30,335 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-27T03:35:30,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:35:30,372 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-27T03:35:30,373 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-27T03:35:30,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:30,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:31,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:31,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:31,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:31,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:32,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:32,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:32,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:32,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:33,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:33,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:33,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:33,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:34,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:34,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:34,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:34,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:35,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-27T03:35:35,056 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-27T03:35:35,057 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-27T03:35:35,064 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:35:35,064 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:35,067 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4., hostname=66d3634da959,42215,1732678523752, seqNum=2] 2024-11-27T03:35:35,073 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:35:35,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:35:35,079 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-27T03:35:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-27T03:35:35,080 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T03:35:35,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T03:35:35,230 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:35:35,231 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-27T03:35:35,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42215 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-27T03:35:35,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:35,247 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing e4ad7d2a05435a9c1ec66673639204a4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-27T03:35:35,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/5c3d7b85c7dc46a89e713db0b789bec2 is 1080, key is row0001/info:/1732678535068/Put/seqid=0 2024-11-27T03:35:35,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741837_1013 (size=6033) 2024-11-27T03:35:35,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741837_1013 (size=6033) 2024-11-27T03:35:35,268 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/5c3d7b85c7dc46a89e713db0b789bec2 2024-11-27T03:35:35,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/5c3d7b85c7dc46a89e713db0b789bec2 as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/5c3d7b85c7dc46a89e713db0b789bec2 2024-11-27T03:35:35,281 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/5c3d7b85c7dc46a89e713db0b789bec2, entries=1, sequenceid=5, filesize=5.9 K 2024-11-27T03:35:35,282 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e4ad7d2a05435a9c1ec66673639204a4 in 35ms, sequenceid=5, compaction requested=false 2024-11-27T03:35:35,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for e4ad7d2a05435a9c1ec66673639204a4: 2024-11-27T03:35:35,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:35,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-27T03:35:35,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-27T03:35:35,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-27T03:35:35,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-11-27T03:35:35,293 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 217 msec 2024-11-27T03:35:35,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:35,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:35,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:35,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:36,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:36,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:36,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:36,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 after 68073ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:36,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:36,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta after 68083ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-27T03:35:37,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:37,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:37,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:37,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:38,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:38,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:38,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:38,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:39,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:39,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:39,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:39,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:40,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:40,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:40,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:40,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:41,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:41,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:41,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:41,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:42,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:42,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:42,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:42,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:43,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:43,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:43,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:43,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:44,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:44,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:44,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:44,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:45,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-27T03:35:45,146 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-27T03:35:45,154 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:35:45,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:35:45,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-27T03:35:45,160 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-27T03:35:45,161 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T03:35:45,162 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T03:35:45,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42215 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-27T03:35:45,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:45,318 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing e4ad7d2a05435a9c1ec66673639204a4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-27T03:35:45,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/73cf760a95174e278451c4c06df90176 is 1080, key is row0002/info:/1732678545149/Put/seqid=0 2024-11-27T03:35:45,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741838_1014 (size=6033) 2024-11-27T03:35:45,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741838_1014 (size=6033) 2024-11-27T03:35:45,334 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/73cf760a95174e278451c4c06df90176 2024-11-27T03:35:45,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/73cf760a95174e278451c4c06df90176 as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/73cf760a95174e278451c4c06df90176 2024-11-27T03:35:45,348 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/73cf760a95174e278451c4c06df90176, entries=1, sequenceid=9, filesize=5.9 K 2024-11-27T03:35:45,349 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e4ad7d2a05435a9c1ec66673639204a4 in 31ms, sequenceid=9, compaction requested=false 2024-11-27T03:35:45,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for e4ad7d2a05435a9c1ec66673639204a4: 2024-11-27T03:35:45,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:45,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-27T03:35:45,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:45,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:45,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-27T03:35:45,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-27T03:35:45,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 190 msec 2024-11-27T03:35:45,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 200 msec 2024-11-27T03:35:45,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:45,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:46,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:46,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:46,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:46,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:47,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:47,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:47,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:47,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:48,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:48,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:48,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:48,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:49,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:49,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:49,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:49,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:50,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:50,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:50,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:50,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:51,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:51,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:51,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:51,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:52,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:52,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:52,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:52,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:53,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:53,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:53,533 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T03:35:53,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:53,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:54,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:54,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:54,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:54,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:55,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-27T03:35:55,196 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-27T03:35:55,203 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C42215%2C1732678523752.1732678555203 2024-11-27T03:35:55,208 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:55,209 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:55,209 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:55,209 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:55,209 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:35:55,209 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.1732678524363 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.1732678555203 2024-11-27T03:35:55,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741833_1009 (size=5546) 2024-11-27T03:35:55,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741833_1009 (size=5546) 2024-11-27T03:35:55,212 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42537:42537),(127.0.0.1/127.0.0.1:45029:45029)] 2024-11-27T03:35:55,213 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:35:55,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:35:55,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-27T03:35:55,215 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-27T03:35:55,216 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T03:35:55,216 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T03:35:55,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:55,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:55,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42215 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-27T03:35:55,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:55,371 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing e4ad7d2a05435a9c1ec66673639204a4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-27T03:35:55,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/f800aaf025934e2ba3530e2ecaf49e35 is 1080, key is row0003/info:/1732678555200/Put/seqid=0 2024-11-27T03:35:55,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741840_1016 (size=6033) 2024-11-27T03:35:55,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741840_1016 (size=6033) 2024-11-27T03:35:55,388 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/f800aaf025934e2ba3530e2ecaf49e35 2024-11-27T03:35:55,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/f800aaf025934e2ba3530e2ecaf49e35 as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/f800aaf025934e2ba3530e2ecaf49e35 2024-11-27T03:35:55,402 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/f800aaf025934e2ba3530e2ecaf49e35, entries=1, sequenceid=13, filesize=5.9 K 2024-11-27T03:35:55,403 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e4ad7d2a05435a9c1ec66673639204a4 in 32ms, sequenceid=13, compaction requested=true 2024-11-27T03:35:55,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for e4ad7d2a05435a9c1ec66673639204a4: 2024-11-27T03:35:55,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:35:55,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-27T03:35:55,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-27T03:35:55,408 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-27T03:35:55,408 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 189 msec 2024-11-27T03:35:55,411 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 196 msec 2024-11-27T03:35:55,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:55,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:56,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:56,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:56,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:56,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:57,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:57,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:57,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:57,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:58,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:58,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:58,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:58,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:59,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:59,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:59,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:35:59,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:00,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:00,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:00,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:00,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:01,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:01,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:01,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:01,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:02,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:02,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:02,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:02,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:03,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:03,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:03,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:03,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:04,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:04,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:04,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:04,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:04,905 INFO [master/66d3634da959:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-27T03:36:04,905 INFO [master/66d3634da959:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-27T03:36:05,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-27T03:36:05,285 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-27T03:36:05,286 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:05,290 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:05,290 DEBUG [Time-limited test {}] regionserver.HStore(1541): e4ad7d2a05435a9c1ec66673639204a4/info is initiating minor compaction (all files) 2024-11-27T03:36:05,290 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T03:36:05,290 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:05,291 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of e4ad7d2a05435a9c1ec66673639204a4/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:36:05,291 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/5c3d7b85c7dc46a89e713db0b789bec2, hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/73cf760a95174e278451c4c06df90176, hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/f800aaf025934e2ba3530e2ecaf49e35] into tmpdir=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp, totalSize=17.7 K 2024-11-27T03:36:05,292 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5c3d7b85c7dc46a89e713db0b789bec2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732678535068 2024-11-27T03:36:05,293 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 73cf760a95174e278451c4c06df90176, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732678545149 2024-11-27T03:36:05,293 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f800aaf025934e2ba3530e2ecaf49e35, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732678555200 2024-11-27T03:36:05,308 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): e4ad7d2a05435a9c1ec66673639204a4#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:05,309 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/d57892beb356448cb220c70d051caeec is 1080, key is row0001/info:/1732678535068/Put/seqid=0 2024-11-27T03:36:05,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741841_1017 (size=8296) 2024-11-27T03:36:05,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741841_1017 (size=8296) 2024-11-27T03:36:05,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:05,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:05,378 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/d57892beb356448cb220c70d051caeec as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/d57892beb356448cb220c70d051caeec 2024-11-27T03:36:05,384 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e4ad7d2a05435a9c1ec66673639204a4/info of e4ad7d2a05435a9c1ec66673639204a4 into d57892beb356448cb220c70d051caeec(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:05,384 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for e4ad7d2a05435a9c1ec66673639204a4: 2024-11-27T03:36:05,387 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C42215%2C1732678523752.1732678565386 2024-11-27T03:36:05,392 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:05,393 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:05,393 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:05,393 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:05,393 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:05,393 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.1732678555203 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.1732678565386 2024-11-27T03:36:05,394 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42537:42537),(127.0.0.1/127.0.0.1:45029:45029)] 2024-11-27T03:36:05,394 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.1732678555203 is not closed yet, will try archiving it next time 2024-11-27T03:36:05,394 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.1732678524363 to hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/oldWALs/66d3634da959%2C42215%2C1732678523752.1732678524363 2024-11-27T03:36:05,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741839_1015 (size=2520) 2024-11-27T03:36:05,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:36:05,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741839_1015 (size=2520) 2024-11-27T03:36:05,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:36:05,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-27T03:36:05,398 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-27T03:36:05,399 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-27T03:36:05,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-27T03:36:05,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42215 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-27T03:36:05,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:36:05,553 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing e4ad7d2a05435a9c1ec66673639204a4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-27T03:36:05,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/5dac3df2876e443caea38429e9b35987 is 1080, key is row0000/info:/1732678565385/Put/seqid=0 2024-11-27T03:36:05,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741843_1019 (size=6033) 2024-11-27T03:36:05,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741843_1019 (size=6033) 2024-11-27T03:36:05,566 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/5dac3df2876e443caea38429e9b35987 2024-11-27T03:36:05,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:05,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/5dac3df2876e443caea38429e9b35987 as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/5dac3df2876e443caea38429e9b35987 2024-11-27T03:36:05,578 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/5dac3df2876e443caea38429e9b35987, entries=1, sequenceid=18, filesize=5.9 K 2024-11-27T03:36:05,580 INFO [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e4ad7d2a05435a9c1ec66673639204a4 in 27ms, sequenceid=18, compaction requested=false 2024-11-27T03:36:05,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for e4ad7d2a05435a9c1ec66673639204a4: 2024-11-27T03:36:05,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:36:05,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-27T03:36:05,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-27T03:36:05,584 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-27T03:36:05,584 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 182 msec 2024-11-27T03:36:05,586 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-11-27T03:36:05,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:06,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:06,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:06,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:06,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:07,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:07,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:07,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:07,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:08,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:08,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:08,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:08,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:09,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:09,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:09,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:09,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:10,343 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e4ad7d2a05435a9c1ec66673639204a4, had cached 0 bytes from a total of 14329 2024-11-27T03:36:10,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:10,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:10,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:10,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:11,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:11,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:11,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:11,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:12,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:12,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:12,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:12,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:13,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:13,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:13,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:13,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:14,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:14,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:14,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:14,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:15,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:15,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:15,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-27T03:36:15,507 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-27T03:36:15,510 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C42215%2C1732678523752.1732678575510 2024-11-27T03:36:15,523 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,523 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,523 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,523 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,524 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,524 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.1732678565386 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.1732678575510 2024-11-27T03:36:15,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741842_1018 (size=2026) 2024-11-27T03:36:15,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741842_1018 (size=2026) 2024-11-27T03:36:15,525 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/WALs/66d3634da959,42215,1732678523752/66d3634da959%2C42215%2C1732678523752.1732678555203 to hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/oldWALs/66d3634da959%2C42215%2C1732678523752.1732678555203 2024-11-27T03:36:15,528 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45029:45029),(127.0.0.1/127.0.0.1:42537:42537)] 2024-11-27T03:36:15,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-27T03:36:15,528 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:36:15,528 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:36:15,528 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:15,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:15,529 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-27T03:36:15,529 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-27T03:36:15,529 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=999169588, stopped=false 2024-11-27T03:36:15,529 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=66d3634da959,36843,1732678523551 2024-11-27T03:36:15,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:36:15,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:36:15,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:15,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:15,560 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:36:15,560 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:36:15,560 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:36:15,560 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:15,560 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:36:15,560 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '66d3634da959,42215,1732678523752' ***** 2024-11-27T03:36:15,560 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-27T03:36:15,560 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:36:15,560 INFO [RS:0;66d3634da959:42215 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-27T03:36:15,561 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(3091): Received CLOSE for e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(959): stopping server 66d3634da959,42215,1732678523752 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;66d3634da959:42215. 2024-11-27T03:36:15,561 DEBUG [RS:0;66d3634da959:42215 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:36:15,561 DEBUG [RS:0;66d3634da959:42215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:15,561 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e4ad7d2a05435a9c1ec66673639204a4, disabling compactions & flushes 2024-11-27T03:36:15,561 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-27T03:36:15,561 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-27T03:36:15,561 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. after waiting 0 ms 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-27T03:36:15,561 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-27T03:36:15,561 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing e4ad7d2a05435a9c1ec66673639204a4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-27T03:36:15,561 INFO [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-27T03:36:15,561 DEBUG [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, e4ad7d2a05435a9c1ec66673639204a4=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.} 2024-11-27T03:36:15,561 DEBUG [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e4ad7d2a05435a9c1ec66673639204a4 2024-11-27T03:36:15,562 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:36:15,562 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:36:15,562 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:36:15,562 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:36:15,562 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:36:15,562 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-27T03:36:15,569 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/cb0bf43334d64e728b3f32264d391dd4 is 1080, key is row0001/info:/1732678575508/Put/seqid=0 2024-11-27T03:36:15,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741845_1021 (size=6033) 2024-11-27T03:36:15,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741845_1021 (size=6033) 2024-11-27T03:36:15,574 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/cb0bf43334d64e728b3f32264d391dd4 2024-11-27T03:36:15,580 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/.tmp/info/cb0bf43334d64e728b3f32264d391dd4 as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/cb0bf43334d64e728b3f32264d391dd4 2024-11-27T03:36:15,581 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/.tmp/info/9ee11569316f4fceb5c826a373add7cd is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4./info:regioninfo/1732678525358/Put/seqid=0 2024-11-27T03:36:15,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:15,585 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/cb0bf43334d64e728b3f32264d391dd4, entries=1, sequenceid=22, filesize=5.9 K 2024-11-27T03:36:15,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741846_1022 (size=7308) 2024-11-27T03:36:15,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741846_1022 (size=7308) 2024-11-27T03:36:15,586 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/.tmp/info/9ee11569316f4fceb5c826a373add7cd 2024-11-27T03:36:15,586 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e4ad7d2a05435a9c1ec66673639204a4 in 25ms, sequenceid=22, compaction requested=true 2024-11-27T03:36:15,587 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/5c3d7b85c7dc46a89e713db0b789bec2, hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/73cf760a95174e278451c4c06df90176, hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/f800aaf025934e2ba3530e2ecaf49e35] to archive 2024-11-27T03:36:15,587 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T03:36:15,589 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/5c3d7b85c7dc46a89e713db0b789bec2 to hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/5c3d7b85c7dc46a89e713db0b789bec2 2024-11-27T03:36:15,590 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/73cf760a95174e278451c4c06df90176 to hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/73cf760a95174e278451c4c06df90176 2024-11-27T03:36:15,591 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/f800aaf025934e2ba3530e2ecaf49e35 to hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/info/f800aaf025934e2ba3530e2ecaf49e35 2024-11-27T03:36:15,592 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=66d3634da959:36843 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-27T03:36:15,592 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [5c3d7b85c7dc46a89e713db0b789bec2=6033, 73cf760a95174e278451c4c06df90176=6033, f800aaf025934e2ba3530e2ecaf49e35=6033] 2024-11-27T03:36:15,596 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/e4ad7d2a05435a9c1ec66673639204a4/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-27T03:36:15,596 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:36:15,596 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e4ad7d2a05435a9c1ec66673639204a4: Waiting for close lock at 1732678575561Running coprocessor pre-close hooks at 1732678575561Disabling compacts and flushes for region at 1732678575561Disabling writes for close at 1732678575561Obtaining lock to block concurrent updates at 1732678575561Preparing flush snapshotting stores in e4ad7d2a05435a9c1ec66673639204a4 at 1732678575561Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732678575562 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. at 1732678575562Flushing e4ad7d2a05435a9c1ec66673639204a4/info: creating writer at 1732678575562Flushing e4ad7d2a05435a9c1ec66673639204a4/info: appending metadata at 1732678575568 (+6 ms)Flushing e4ad7d2a05435a9c1ec66673639204a4/info: closing flushed file at 1732678575568Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c6c2e69: reopening flushed file at 1732678575579 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for e4ad7d2a05435a9c1ec66673639204a4 in 25ms, sequenceid=22, compaction requested=true at 1732678575586 (+7 ms)Writing region close event to WAL at 1732678575592 (+6 ms)Running coprocessor post-close hooks at 1732678575596 (+4 ms)Closed at 1732678575596 2024-11-27T03:36:15,596 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732678524995.e4ad7d2a05435a9c1ec66673639204a4. 2024-11-27T03:36:15,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:15,605 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/.tmp/ns/293efc29e19a4336a94f934839665c0f is 43, key is default/ns:d/1732678524890/Put/seqid=0 2024-11-27T03:36:15,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741847_1023 (size=5153) 2024-11-27T03:36:15,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741847_1023 (size=5153) 2024-11-27T03:36:15,610 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/.tmp/ns/293efc29e19a4336a94f934839665c0f 2024-11-27T03:36:15,627 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/.tmp/table/13db0c6e062f4ac5a8ea1b3509d30f7e is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732678525368/Put/seqid=0 2024-11-27T03:36:15,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741848_1024 (size=5508) 2024-11-27T03:36:15,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741848_1024 (size=5508) 2024-11-27T03:36:15,632 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/.tmp/table/13db0c6e062f4ac5a8ea1b3509d30f7e 2024-11-27T03:36:15,637 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/.tmp/info/9ee11569316f4fceb5c826a373add7cd as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/info/9ee11569316f4fceb5c826a373add7cd 2024-11-27T03:36:15,643 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/info/9ee11569316f4fceb5c826a373add7cd, entries=10, sequenceid=11, filesize=7.1 K 2024-11-27T03:36:15,643 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/.tmp/ns/293efc29e19a4336a94f934839665c0f as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/ns/293efc29e19a4336a94f934839665c0f 2024-11-27T03:36:15,648 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/ns/293efc29e19a4336a94f934839665c0f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-27T03:36:15,649 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/.tmp/table/13db0c6e062f4ac5a8ea1b3509d30f7e as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/table/13db0c6e062f4ac5a8ea1b3509d30f7e 2024-11-27T03:36:15,653 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/table/13db0c6e062f4ac5a8ea1b3509d30f7e, entries=2, sequenceid=11, filesize=5.4 K 2024-11-27T03:36:15,654 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 92ms, sequenceid=11, compaction requested=false 2024-11-27T03:36:15,659 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-27T03:36:15,660 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:36:15,660 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:36:15,660 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678575562Running coprocessor pre-close hooks at 1732678575562Disabling compacts and flushes for region at 1732678575562Disabling writes for close at 1732678575562Obtaining lock to block concurrent updates at 1732678575562Preparing flush snapshotting stores in 1588230740 at 1732678575562Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732678575562Flushing stores of hbase:meta,,1.1588230740 at 1732678575563 (+1 ms)Flushing 1588230740/info: creating writer at 1732678575563Flushing 1588230740/info: appending metadata at 1732678575581 (+18 ms)Flushing 1588230740/info: closing flushed file at 1732678575581Flushing 1588230740/ns: creating writer at 1732678575591 (+10 ms)Flushing 1588230740/ns: appending metadata at 1732678575605 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732678575605Flushing 1588230740/table: creating writer at 1732678575614 (+9 ms)Flushing 1588230740/table: appending metadata at 1732678575627 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732678575627Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f3d56d1: reopening flushed file at 1732678575636 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c4ae34: reopening flushed file at 1732678575643 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@557c5489: reopening flushed file at 1732678575648 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 92ms, sequenceid=11, compaction requested=false at 1732678575654 (+6 ms)Writing region close event to WAL at 1732678575656 (+2 ms)Running coprocessor post-close hooks at 1732678575659 (+3 ms)Closed at 1732678575660 (+1 ms) 2024-11-27T03:36:15,660 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-27T03:36:15,762 INFO [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(976): stopping server 66d3634da959,42215,1732678523752; all regions closed. 2024-11-27T03:36:15,763 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,763 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,763 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,763 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,763 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741834_1010 (size=3306) 2024-11-27T03:36:15,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741834_1010 (size=3306) 2024-11-27T03:36:15,772 DEBUG [RS:0;66d3634da959:42215 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/oldWALs 2024-11-27T03:36:15,772 INFO [RS:0;66d3634da959:42215 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C42215%2C1732678523752.meta:.meta(num 1732678524795) 2024-11-27T03:36:15,773 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,773 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,773 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,773 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,773 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741844_1020 (size=1252) 2024-11-27T03:36:15,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741844_1020 (size=1252) 2024-11-27T03:36:15,781 DEBUG [RS:0;66d3634da959:42215 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/oldWALs 2024-11-27T03:36:15,781 INFO [RS:0;66d3634da959:42215 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C42215%2C1732678523752:(num 1732678575510) 2024-11-27T03:36:15,781 DEBUG [RS:0;66d3634da959:42215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:15,781 INFO [RS:0;66d3634da959:42215 {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:36:15,781 INFO [RS:0;66d3634da959:42215 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:36:15,782 INFO [RS:0;66d3634da959:42215 {}] hbase.ChoreService(370): Chore service for: regionserver/66d3634da959:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-27T03:36:15,782 INFO [RS:0;66d3634da959:42215 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:36:15,782 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:36:15,782 INFO [RS:0;66d3634da959:42215 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42215 2024-11-27T03:36:15,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d3634da959,42215,1732678523752 2024-11-27T03:36:15,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:36:15,795 INFO [RS:0;66d3634da959:42215 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:36:15,806 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d3634da959,42215,1732678523752] 2024-11-27T03:36:15,816 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/66d3634da959,42215,1732678523752 already deleted, retry=false 2024-11-27T03:36:15,816 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 66d3634da959,42215,1732678523752 expired; onlineServers=0 2024-11-27T03:36:15,816 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '66d3634da959,36843,1732678523551' ***** 2024-11-27T03:36:15,816 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-27T03:36:15,816 INFO [M:0;66d3634da959:36843 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:36:15,816 INFO [M:0;66d3634da959:36843 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:36:15,816 DEBUG [M:0;66d3634da959:36843 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-27T03:36:15,816 DEBUG [M:0;66d3634da959:36843 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-27T03:36:15,816 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-27T03:36:15,816 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678524134 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678524134,5,FailOnTimeoutGroup] 2024-11-27T03:36:15,816 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678524134 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678524134,5,FailOnTimeoutGroup] 2024-11-27T03:36:15,817 INFO [M:0;66d3634da959:36843 {}] hbase.ChoreService(370): Chore service for: master/66d3634da959:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-27T03:36:15,817 INFO [M:0;66d3634da959:36843 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:36:15,817 DEBUG [M:0;66d3634da959:36843 {}] master.HMaster(1795): Stopping service threads 2024-11-27T03:36:15,817 INFO [M:0;66d3634da959:36843 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-27T03:36:15,817 INFO [M:0;66d3634da959:36843 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:36:15,817 INFO [M:0;66d3634da959:36843 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-27T03:36:15,817 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-27T03:36:15,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-27T03:36:15,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:15,827 DEBUG [M:0;66d3634da959:36843 {}] zookeeper.ZKUtil(347): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-27T03:36:15,827 WARN [M:0;66d3634da959:36843 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-27T03:36:15,827 INFO [M:0;66d3634da959:36843 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/.lastflushedseqids 2024-11-27T03:36:15,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741849_1025 (size=130) 2024-11-27T03:36:15,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741849_1025 (size=130) 2024-11-27T03:36:15,835 INFO [M:0;66d3634da959:36843 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-27T03:36:15,836 INFO [M:0;66d3634da959:36843 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-27T03:36:15,836 DEBUG [M:0;66d3634da959:36843 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:36:15,836 INFO [M:0;66d3634da959:36843 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:15,836 DEBUG [M:0;66d3634da959:36843 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:15,836 DEBUG [M:0;66d3634da959:36843 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:36:15,836 DEBUG [M:0;66d3634da959:36843 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:15,836 INFO [M:0;66d3634da959:36843 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.89 KB 2024-11-27T03:36:15,853 DEBUG [M:0;66d3634da959:36843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8c4ecf49bf3b4fdda70904a9a3e01e34 is 82, key is hbase:meta,,1/info:regioninfo/1732678524824/Put/seqid=0 2024-11-27T03:36:15,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741850_1026 (size=5672) 2024-11-27T03:36:15,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741850_1026 (size=5672) 2024-11-27T03:36:15,858 INFO [M:0;66d3634da959:36843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8c4ecf49bf3b4fdda70904a9a3e01e34 2024-11-27T03:36:15,876 DEBUG [M:0;66d3634da959:36843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ca25280ecdce47ba9dc2e19df0dfced5 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732678525372/Put/seqid=0 2024-11-27T03:36:15,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741851_1027 (size=7817) 2024-11-27T03:36:15,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741851_1027 (size=7817) 2024-11-27T03:36:15,881 INFO [M:0;66d3634da959:36843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.93 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ca25280ecdce47ba9dc2e19df0dfced5 2024-11-27T03:36:15,885 INFO [M:0;66d3634da959:36843 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ca25280ecdce47ba9dc2e19df0dfced5 2024-11-27T03:36:15,899 DEBUG [M:0;66d3634da959:36843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3820929f172541909e6660d010f5747b is 69, key is 66d3634da959,42215,1732678523752/rs:state/1732678524209/Put/seqid=0 2024-11-27T03:36:15,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741852_1028 (size=5156) 2024-11-27T03:36:15,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741852_1028 (size=5156) 2024-11-27T03:36:15,904 INFO [M:0;66d3634da959:36843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3820929f172541909e6660d010f5747b 2024-11-27T03:36:15,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:36:15,906 INFO [RS:0;66d3634da959:42215 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:36:15,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42215-0x1017a5c163a0001, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:36:15,906 INFO [RS:0;66d3634da959:42215 {}] regionserver.HRegionServer(1031): Exiting; stopping=66d3634da959,42215,1732678523752; zookeeper connection closed. 2024-11-27T03:36:15,906 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@39a92ea1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@39a92ea1 2024-11-27T03:36:15,906 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-27T03:36:15,928 DEBUG [M:0;66d3634da959:36843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2bfa5e8004774ba99ae8cb813b236bd3 is 52, key is load_balancer_on/state:d/1732678524991/Put/seqid=0 2024-11-27T03:36:15,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741853_1029 (size=5056) 2024-11-27T03:36:15,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741853_1029 (size=5056) 2024-11-27T03:36:15,933 INFO [M:0;66d3634da959:36843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2bfa5e8004774ba99ae8cb813b236bd3 2024-11-27T03:36:15,939 DEBUG [M:0;66d3634da959:36843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8c4ecf49bf3b4fdda70904a9a3e01e34 as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8c4ecf49bf3b4fdda70904a9a3e01e34 2024-11-27T03:36:15,943 INFO [M:0;66d3634da959:36843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8c4ecf49bf3b4fdda70904a9a3e01e34, entries=8, sequenceid=121, filesize=5.5 K 2024-11-27T03:36:15,944 DEBUG [M:0;66d3634da959:36843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ca25280ecdce47ba9dc2e19df0dfced5 as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ca25280ecdce47ba9dc2e19df0dfced5 2024-11-27T03:36:15,949 INFO [M:0;66d3634da959:36843 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ca25280ecdce47ba9dc2e19df0dfced5 2024-11-27T03:36:15,949 INFO [M:0;66d3634da959:36843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ca25280ecdce47ba9dc2e19df0dfced5, entries=14, sequenceid=121, filesize=7.6 K 2024-11-27T03:36:15,950 DEBUG [M:0;66d3634da959:36843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3820929f172541909e6660d010f5747b as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3820929f172541909e6660d010f5747b 2024-11-27T03:36:15,955 INFO [M:0;66d3634da959:36843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3820929f172541909e6660d010f5747b, entries=1, sequenceid=121, filesize=5.0 K 2024-11-27T03:36:15,956 DEBUG [M:0;66d3634da959:36843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2bfa5e8004774ba99ae8cb813b236bd3 as hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2bfa5e8004774ba99ae8cb813b236bd3 2024-11-27T03:36:15,961 INFO [M:0;66d3634da959:36843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43791/user/jenkins/test-data/11c10bf4-3e82-e4dd-ec97-dcb20158d147/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2bfa5e8004774ba99ae8cb813b236bd3, entries=1, sequenceid=121, filesize=4.9 K 2024-11-27T03:36:15,962 INFO [M:0;66d3634da959:36843 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44581, heapSize ~54.83 KB/56144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=121, compaction requested=false 2024-11-27T03:36:15,963 INFO [M:0;66d3634da959:36843 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:15,964 DEBUG [M:0;66d3634da959:36843 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678575836Disabling compacts and flushes for region at 1732678575836Disabling writes for close at 1732678575836Obtaining lock to block concurrent updates at 1732678575836Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732678575836Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44581, getHeapSize=56144, getOffHeapSize=0, getCellsCount=140 at 1732678575837 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732678575838 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732678575838Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732678575853 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732678575853Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732678575862 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732678575876 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732678575876Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732678575886 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732678575899 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732678575899Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732678575909 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732678575927 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732678575927Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@53b80ae0: reopening flushed file at 1732678575938 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2277ef7a: reopening flushed file at 1732678575944 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45c8339d: reopening flushed file at 1732678575949 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@661dfb40: reopening flushed file at 1732678575955 (+6 ms)Finished flush of dataSize ~43.54 KB/44581, heapSize ~54.83 KB/56144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 126ms, sequenceid=121, compaction requested=false at 1732678575962 (+7 ms)Writing region close event to WAL at 1732678575963 (+1 ms)Closed at 1732678575963 2024-11-27T03:36:15,964 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,964 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,964 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,964 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,964 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:15,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34045 is added to blk_1073741830_1006 (size=52978) 2024-11-27T03:36:15,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37469 is added to blk_1073741830_1006 (size=52978) 2024-11-27T03:36:15,967 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:36:15,967 INFO [M:0;66d3634da959:36843 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-27T03:36:15,967 INFO [M:0;66d3634da959:36843 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36843 2024-11-27T03:36:15,967 INFO [M:0;66d3634da959:36843 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:36:16,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:36:16,074 INFO [M:0;66d3634da959:36843 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:36:16,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36843-0x1017a5c163a0000, quorum=127.0.0.1:55892, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:36:16,081 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@65c292b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:36:16,081 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6719db8a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:36:16,082 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:36:16,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@317cfbff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:36:16,082 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74e6f5d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/hadoop.log.dir/,STOPPED} 2024-11-27T03:36:16,085 WARN [BP-1891682051-172.17.0.3-1732678521368 heartbeating to localhost/127.0.0.1:43791 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:36:16,085 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:36:16,085 WARN [BP-1891682051-172.17.0.3-1732678521368 heartbeating to localhost/127.0.0.1:43791 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891682051-172.17.0.3-1732678521368 (Datanode Uuid c0e39c88-aa42-4f3c-ad95-b0ce9c5ef496) service to localhost/127.0.0.1:43791 2024-11-27T03:36:16,085 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:36:16,086 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/data/data3/current/BP-1891682051-172.17.0.3-1732678521368 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:36:16,086 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/data/data4/current/BP-1891682051-172.17.0.3-1732678521368 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:36:16,087 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:36:16,089 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74d20704{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:36:16,089 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54e83224{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:36:16,089 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:36:16,089 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc731d8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:36:16,089 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b135886{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/hadoop.log.dir/,STOPPED} 2024-11-27T03:36:16,091 WARN [BP-1891682051-172.17.0.3-1732678521368 heartbeating to localhost/127.0.0.1:43791 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:36:16,091 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:36:16,091 WARN [BP-1891682051-172.17.0.3-1732678521368 heartbeating to localhost/127.0.0.1:43791 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891682051-172.17.0.3-1732678521368 (Datanode Uuid ac2b6ff5-d558-442a-82bb-5f4f0014dcc3) service to localhost/127.0.0.1:43791 2024-11-27T03:36:16,091 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:36:16,092 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/data/data1/current/BP-1891682051-172.17.0.3-1732678521368 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:36:16,092 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/cluster_69b5bd1b-f1ba-2599-76ac-2ed3d406146d/data/data2/current/BP-1891682051-172.17.0.3-1732678521368 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:36:16,092 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:36:16,098 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ec454b0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:36:16,099 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fdc15a6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:36:16,099 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:36:16,099 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ea88c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:36:16,099 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1106c0e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/hadoop.log.dir/,STOPPED} 2024-11-27T03:36:16,104 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-27T03:36:16,121 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-27T03:36:16,128 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=209 (was 183) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43791 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43791 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/66d3634da959:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43791 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:43791 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43791 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=61 (was 77), ProcessCount=11 (was 11), AvailableMemoryMB=7816 (was 7851) 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=209, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=61, ProcessCount=11, AvailableMemoryMB=7816 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/hadoop.log.dir so I do NOT create it in target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3fe20ab7-c50b-9027-50f0-4c11bc18f4dc/hadoop.tmp.dir so I do NOT create it in target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf, deleteOnExit=true 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/test.cache.data in system properties and HBase conf 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/hadoop.tmp.dir in system properties and HBase conf 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/hadoop.log.dir in system properties and HBase conf 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-27T03:36:16,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-27T03:36:16,135 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/nfs.dump.dir in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/java.io.tmpdir in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:36:16,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-27T03:36:16,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-27T03:36:16,150 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:36:16,233 INFO [regionserver/66d3634da959:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:36:16,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:16,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:16,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:16,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:16,659 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:36:16,662 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:36:16,663 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:36:16,663 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:36:16,663 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:36:16,664 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:36:16,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@622d58de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:36:16,665 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7305dd28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:36:16,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@132e4184{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/java.io.tmpdir/jetty-localhost-38619-hadoop-hdfs-3_4_1-tests_jar-_-any-11828139113540990285/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:36:16,758 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@53298b3d{HTTP/1.1, (http/1.1)}{localhost:38619} 2024-11-27T03:36:16,758 INFO [Time-limited test {}] server.Server(415): Started @257802ms 2024-11-27T03:36:16,769 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:36:17,059 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:36:17,061 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:36:17,062 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:36:17,062 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:36:17,062 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:36:17,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31e425dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:36:17,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3523e770{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:36:17,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c9ba3de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/java.io.tmpdir/jetty-localhost-41037-hadoop-hdfs-3_4_1-tests_jar-_-any-15203291963389890141/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:36:17,155 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70d55230{HTTP/1.1, (http/1.1)}{localhost:41037} 2024-11-27T03:36:17,155 INFO [Time-limited test {}] server.Server(415): Started @258199ms 2024-11-27T03:36:17,156 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:36:17,180 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:36:17,182 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:36:17,183 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:36:17,183 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:36:17,183 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:36:17,183 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@216b0c63{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:36:17,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc4994c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:36:17,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2998c4ad{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/java.io.tmpdir/jetty-localhost-45027-hadoop-hdfs-3_4_1-tests_jar-_-any-14492887921563426702/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:36:17,274 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9caeb33{HTTP/1.1, (http/1.1)}{localhost:45027} 2024-11-27T03:36:17,274 INFO [Time-limited test {}] server.Server(415): Started @258318ms 2024-11-27T03:36:17,275 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:36:17,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:17,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:17,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:17,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:18,302 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/data/data1/current/BP-216229593-172.17.0.3-1732678576154/current, will proceed with Du for space computation calculation, 2024-11-27T03:36:18,302 WARN [Thread-1979 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/data/data2/current/BP-216229593-172.17.0.3-1732678576154/current, will proceed with Du for space computation calculation, 2024-11-27T03:36:18,321 WARN [Thread-1942 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:36:18,322 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2e5b08aaf6773412 with lease ID 0xf9f3ad27037adcab: Processing first storage report for DS-47aee4b2-af11-46d3-9e29-7c3be6f4bfa3 from datanode DatanodeRegistration(127.0.0.1:35741, datanodeUuid=dbba5afb-11a3-4dbd-b8ec-f6de5bafee9d, infoPort=43059, infoSecurePort=0, ipcPort=45255, storageInfo=lv=-57;cid=testClusterID;nsid=1537283304;c=1732678576154) 2024-11-27T03:36:18,323 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2e5b08aaf6773412 with lease ID 0xf9f3ad27037adcab: from storage DS-47aee4b2-af11-46d3-9e29-7c3be6f4bfa3 node DatanodeRegistration(127.0.0.1:35741, datanodeUuid=dbba5afb-11a3-4dbd-b8ec-f6de5bafee9d, infoPort=43059, infoSecurePort=0, ipcPort=45255, storageInfo=lv=-57;cid=testClusterID;nsid=1537283304;c=1732678576154), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:36:18,323 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2e5b08aaf6773412 with lease ID 0xf9f3ad27037adcab: Processing first storage report for DS-e25acd8d-0918-45d6-8229-0bfc70afef5d from datanode DatanodeRegistration(127.0.0.1:35741, datanodeUuid=dbba5afb-11a3-4dbd-b8ec-f6de5bafee9d, infoPort=43059, infoSecurePort=0, ipcPort=45255, storageInfo=lv=-57;cid=testClusterID;nsid=1537283304;c=1732678576154) 2024-11-27T03:36:18,323 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2e5b08aaf6773412 with lease ID 0xf9f3ad27037adcab: from storage DS-e25acd8d-0918-45d6-8229-0bfc70afef5d node DatanodeRegistration(127.0.0.1:35741, datanodeUuid=dbba5afb-11a3-4dbd-b8ec-f6de5bafee9d, infoPort=43059, infoSecurePort=0, ipcPort=45255, storageInfo=lv=-57;cid=testClusterID;nsid=1537283304;c=1732678576154), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:36:18,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:18,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:18,423 WARN [Thread-1989 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/data/data3/current/BP-216229593-172.17.0.3-1732678576154/current, will proceed with Du for space computation calculation, 2024-11-27T03:36:18,423 WARN [Thread-1990 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/data/data4/current/BP-216229593-172.17.0.3-1732678576154/current, will proceed with Du for space computation calculation, 2024-11-27T03:36:18,441 WARN [Thread-1965 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:36:18,443 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde1aa6ec1e6199b2 with lease ID 0xf9f3ad27037adcac: Processing first storage report for DS-ab67aada-8083-4ba8-b1b7-873e0df0df6b from datanode DatanodeRegistration(127.0.0.1:43267, datanodeUuid=404add00-c613-4013-8a55-fbb048b0c417, infoPort=46189, infoSecurePort=0, ipcPort=42527, storageInfo=lv=-57;cid=testClusterID;nsid=1537283304;c=1732678576154) 2024-11-27T03:36:18,443 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde1aa6ec1e6199b2 with lease ID 0xf9f3ad27037adcac: from storage DS-ab67aada-8083-4ba8-b1b7-873e0df0df6b node DatanodeRegistration(127.0.0.1:43267, datanodeUuid=404add00-c613-4013-8a55-fbb048b0c417, infoPort=46189, infoSecurePort=0, ipcPort=42527, storageInfo=lv=-57;cid=testClusterID;nsid=1537283304;c=1732678576154), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:36:18,443 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xde1aa6ec1e6199b2 with lease ID 0xf9f3ad27037adcac: Processing first storage report for DS-e5242aa2-ddb8-4d59-ab33-3e8d69848a41 from datanode DatanodeRegistration(127.0.0.1:43267, datanodeUuid=404add00-c613-4013-8a55-fbb048b0c417, infoPort=46189, infoSecurePort=0, ipcPort=42527, storageInfo=lv=-57;cid=testClusterID;nsid=1537283304;c=1732678576154) 2024-11-27T03:36:18,443 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xde1aa6ec1e6199b2 with lease ID 0xf9f3ad27037adcac: from storage DS-e5242aa2-ddb8-4d59-ab33-3e8d69848a41 node DatanodeRegistration(127.0.0.1:43267, datanodeUuid=404add00-c613-4013-8a55-fbb048b0c417, infoPort=46189, infoSecurePort=0, ipcPort=42527, storageInfo=lv=-57;cid=testClusterID;nsid=1537283304;c=1732678576154), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:36:18,504 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14 2024-11-27T03:36:18,510 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/zookeeper_0, clientPort=56638, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-27T03:36:18,512 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56638 2024-11-27T03:36:18,512 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:18,515 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:18,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:36:18,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:36:18,528 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42 with version=8 2024-11-27T03:36:18,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/hbase-staging 2024-11-27T03:36:18,531 INFO [Time-limited test {}] client.ConnectionUtils(128): master/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:36:18,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:18,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:18,531 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:36:18,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:18,532 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:36:18,532 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-27T03:36:18,532 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:36:18,533 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38227 2024-11-27T03:36:18,535 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38227 connecting to ZooKeeper ensemble=127.0.0.1:56638 2024-11-27T03:36:18,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:18,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:382270x0, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:36:18,596 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38227-0x1017a5cecf70000 connected 2024-11-27T03:36:18,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:18,680 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:18,684 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:18,687 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:36:18,687 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42, hbase.cluster.distributed=false 2024-11-27T03:36:18,690 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:36:18,690 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38227 2024-11-27T03:36:18,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38227 2024-11-27T03:36:18,691 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38227 2024-11-27T03:36:18,692 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38227 2024-11-27T03:36:18,692 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38227 2024-11-27T03:36:18,710 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:36:18,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:18,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:18,710 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:36:18,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:18,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:36:18,710 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T03:36:18,710 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:36:18,711 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34443 2024-11-27T03:36:18,712 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34443 connecting to ZooKeeper ensemble=127.0.0.1:56638 2024-11-27T03:36:18,713 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:18,714 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:18,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:344430x0, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:36:18,728 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:344430x0, quorum=127.0.0.1:56638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:36:18,728 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34443-0x1017a5cecf70001 connected 2024-11-27T03:36:18,729 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-27T03:36:18,729 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-27T03:36:18,730 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T03:36:18,732 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:36:18,732 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34443 2024-11-27T03:36:18,732 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34443 2024-11-27T03:36:18,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34443 2024-11-27T03:36:18,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34443 2024-11-27T03:36:18,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34443 2024-11-27T03:36:18,747 DEBUG [M:0;66d3634da959:38227 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;66d3634da959:38227 2024-11-27T03:36:18,747 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/66d3634da959,38227,1732678578531 2024-11-27T03:36:18,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:36:18,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:36:18,760 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/66d3634da959,38227,1732678578531 2024-11-27T03:36:18,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T03:36:18,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:18,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:18,771 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T03:36:18,772 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/66d3634da959,38227,1732678578531 from backup master directory 2024-11-27T03:36:18,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/66d3634da959,38227,1732678578531 2024-11-27T03:36:18,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:36:18,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:36:18,784 WARN [master/66d3634da959:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:36:18,784 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=66d3634da959,38227,1732678578531 2024-11-27T03:36:18,789 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/hbase.id] with ID: 297bf68e-4a3c-441d-832a-afc5e24f7963 2024-11-27T03:36:18,789 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/.tmp/hbase.id 2024-11-27T03:36:18,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:36:18,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:36:18,795 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/.tmp/hbase.id]:[hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/hbase.id] 2024-11-27T03:36:18,806 INFO [master/66d3634da959:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:18,806 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-27T03:36:18,807 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-27T03:36:18,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:18,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:18,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:36:18,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:36:18,822 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:36:18,823 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-27T03:36:18,823 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:36:18,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:36:18,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:36:18,835 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store 2024-11-27T03:36:18,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:36:18,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:36:18,841 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:36:18,841 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:36:18,842 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:18,842 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:18,842 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:36:18,842 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:18,842 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:18,842 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678578841Disabling compacts and flushes for region at 1732678578841Disabling writes for close at 1732678578842 (+1 ms)Writing region close event to WAL at 1732678578842Closed at 1732678578842 2024-11-27T03:36:18,843 WARN [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/.initializing 2024-11-27T03:36:18,843 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/WALs/66d3634da959,38227,1732678578531 2024-11-27T03:36:18,845 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C38227%2C1732678578531, suffix=, logDir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/WALs/66d3634da959,38227,1732678578531, archiveDir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/oldWALs, maxLogs=10 2024-11-27T03:36:18,846 INFO [master/66d3634da959:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C38227%2C1732678578531.1732678578845 2024-11-27T03:36:18,850 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/WALs/66d3634da959,38227,1732678578531/66d3634da959%2C38227%2C1732678578531.1732678578845 2024-11-27T03:36:18,851 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43059:43059),(127.0.0.1/127.0.0.1:46189:46189)] 2024-11-27T03:36:18,851 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:36:18,852 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:36:18,852 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,852 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,853 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-27T03:36:18,854 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:18,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:36:18,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,856 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-27T03:36:18,856 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:18,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:36:18,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,858 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-27T03:36:18,858 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:18,858 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:36:18,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-27T03:36:18,860 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:18,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:36:18,860 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,861 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,861 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,863 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,863 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,863 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-27T03:36:18,864 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:18,869 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:36:18,869 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808685, jitterRate=0.02829669415950775}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-27T03:36:18,870 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732678578852Initializing all the Stores at 1732678578853 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678578853Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678578853Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678578853Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678578853Cleaning up temporary data from old regions at 1732678578863 (+10 ms)Region opened successfully at 1732678578870 (+7 ms) 2024-11-27T03:36:18,870 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-27T03:36:18,873 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@174b1d21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:36:18,874 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-27T03:36:18,874 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-27T03:36:18,874 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-27T03:36:18,874 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-27T03:36:18,874 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-27T03:36:18,875 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-27T03:36:18,875 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-27T03:36:18,877 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-27T03:36:18,878 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-27T03:36:18,889 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-27T03:36:18,890 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-27T03:36:18,890 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-27T03:36:18,900 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-27T03:36:18,900 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-27T03:36:18,901 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-27T03:36:18,910 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-27T03:36:18,911 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-27T03:36:18,921 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-27T03:36:18,923 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-27T03:36:18,931 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-27T03:36:18,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:36:18,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:36:18,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:18,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:18,943 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=66d3634da959,38227,1732678578531, sessionid=0x1017a5cecf70000, setting cluster-up flag (Was=false) 2024-11-27T03:36:18,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:18,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:18,995 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-27T03:36:18,999 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,38227,1732678578531 2024-11-27T03:36:19,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:19,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:19,058 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-27T03:36:19,062 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,38227,1732678578531 2024-11-27T03:36:19,065 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-27T03:36:19,069 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-27T03:36:19,070 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-27T03:36:19,070 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-27T03:36:19,071 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 66d3634da959,38227,1732678578531 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-27T03:36:19,074 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:36:19,074 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:36:19,074 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:36:19,074 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:36:19,074 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/66d3634da959:0, corePoolSize=10, maxPoolSize=10 2024-11-27T03:36:19,074 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,074 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:36:19,074 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,075 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732678609075 2024-11-27T03:36:19,075 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-27T03:36:19,075 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-27T03:36:19,076 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-27T03:36:19,076 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-27T03:36:19,076 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-27T03:36:19,076 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-27T03:36:19,076 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,076 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:36:19,076 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-27T03:36:19,077 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-27T03:36:19,077 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-27T03:36:19,077 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-27T03:36:19,077 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-27T03:36:19,077 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-27T03:36:19,077 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678579077,5,FailOnTimeoutGroup] 2024-11-27T03:36:19,077 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:19,077 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T03:36:19,078 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678579077,5,FailOnTimeoutGroup] 2024-11-27T03:36:19,078 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,078 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-27T03:36:19,078 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,078 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:36:19,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:36:19,084 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-27T03:36:19,084 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42 2024-11-27T03:36:19,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:36:19,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:36:19,090 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:36:19,091 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:36:19,093 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:36:19,093 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:19,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:36:19,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:36:19,095 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:36:19,095 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:19,095 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:36:19,095 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:36:19,096 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:36:19,097 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:19,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:36:19,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:36:19,098 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:36:19,098 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:19,098 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:36:19,098 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:36:19,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740 2024-11-27T03:36:19,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740 2024-11-27T03:36:19,100 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:36:19,100 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:36:19,101 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:36:19,102 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:36:19,103 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:36:19,104 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=746620, jitterRate=-0.05062377452850342}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:36:19,104 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732678579090Initializing all the Stores at 1732678579091 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678579091Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678579091Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678579091Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678579091Cleaning up temporary data from old regions at 1732678579100 (+9 ms)Region opened successfully at 1732678579104 (+4 ms) 2024-11-27T03:36:19,104 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:36:19,104 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:36:19,104 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:36:19,104 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:36:19,104 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:36:19,105 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:36:19,105 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678579104Disabling compacts and flushes for region at 1732678579104Disabling writes for close at 1732678579104Writing region close event to WAL at 1732678579105 (+1 ms)Closed at 1732678579105 2024-11-27T03:36:19,106 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:36:19,106 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-27T03:36:19,106 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-27T03:36:19,107 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:36:19,108 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-27T03:36:19,136 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(746): ClusterId : 297bf68e-4a3c-441d-832a-afc5e24f7963 2024-11-27T03:36:19,136 DEBUG [RS:0;66d3634da959:34443 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-27T03:36:19,150 DEBUG [RS:0;66d3634da959:34443 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-27T03:36:19,150 DEBUG [RS:0;66d3634da959:34443 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-27T03:36:19,166 DEBUG [RS:0;66d3634da959:34443 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-27T03:36:19,166 DEBUG [RS:0;66d3634da959:34443 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ab3d74d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:36:19,183 DEBUG [RS:0;66d3634da959:34443 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;66d3634da959:34443 2024-11-27T03:36:19,183 INFO [RS:0;66d3634da959:34443 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-27T03:36:19,183 INFO [RS:0;66d3634da959:34443 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-27T03:36:19,183 DEBUG [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-27T03:36:19,183 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(2659): reportForDuty to master=66d3634da959,38227,1732678578531 with port=34443, startcode=1732678578710 2024-11-27T03:36:19,184 DEBUG [RS:0;66d3634da959:34443 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T03:36:19,185 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55057, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T03:36:19,186 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38227 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 66d3634da959,34443,1732678578710 2024-11-27T03:36:19,186 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38227 {}] master.ServerManager(517): Registering regionserver=66d3634da959,34443,1732678578710 2024-11-27T03:36:19,187 DEBUG [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42 2024-11-27T03:36:19,187 DEBUG [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44999 2024-11-27T03:36:19,187 DEBUG [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-27T03:36:19,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:36:19,195 DEBUG [RS:0;66d3634da959:34443 {}] zookeeper.ZKUtil(111): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d3634da959,34443,1732678578710 2024-11-27T03:36:19,195 WARN [RS:0;66d3634da959:34443 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:36:19,196 INFO [RS:0;66d3634da959:34443 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:36:19,196 DEBUG [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710 2024-11-27T03:36:19,196 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d3634da959,34443,1732678578710] 2024-11-27T03:36:19,199 INFO [RS:0;66d3634da959:34443 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-27T03:36:19,200 INFO [RS:0;66d3634da959:34443 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-27T03:36:19,201 INFO [RS:0;66d3634da959:34443 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T03:36:19,201 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,201 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-27T03:36:19,201 INFO [RS:0;66d3634da959:34443 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-27T03:36:19,202 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:36:19,202 DEBUG [RS:0;66d3634da959:34443 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:36:19,203 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,203 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,203 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,203 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,203 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,203 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,34443,1732678578710-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:36:19,217 INFO [RS:0;66d3634da959:34443 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-27T03:36:19,217 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,34443,1732678578710-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,217 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,217 INFO [RS:0;66d3634da959:34443 {}] regionserver.Replication(171): 66d3634da959,34443,1732678578710 started 2024-11-27T03:36:19,230 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,230 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(1482): Serving as 66d3634da959,34443,1732678578710, RpcServer on 66d3634da959/172.17.0.3:34443, sessionid=0x1017a5cecf70001 2024-11-27T03:36:19,230 DEBUG [RS:0;66d3634da959:34443 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-27T03:36:19,230 DEBUG [RS:0;66d3634da959:34443 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d3634da959,34443,1732678578710 2024-11-27T03:36:19,230 DEBUG [RS:0;66d3634da959:34443 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,34443,1732678578710' 2024-11-27T03:36:19,230 DEBUG [RS:0;66d3634da959:34443 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-27T03:36:19,231 DEBUG [RS:0;66d3634da959:34443 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-27T03:36:19,231 DEBUG [RS:0;66d3634da959:34443 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-27T03:36:19,231 DEBUG [RS:0;66d3634da959:34443 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-27T03:36:19,231 DEBUG [RS:0;66d3634da959:34443 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d3634da959,34443,1732678578710 2024-11-27T03:36:19,231 DEBUG [RS:0;66d3634da959:34443 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,34443,1732678578710' 2024-11-27T03:36:19,231 DEBUG [RS:0;66d3634da959:34443 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-27T03:36:19,232 DEBUG [RS:0;66d3634da959:34443 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-27T03:36:19,232 DEBUG [RS:0;66d3634da959:34443 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-27T03:36:19,232 INFO [RS:0;66d3634da959:34443 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-27T03:36:19,232 INFO [RS:0;66d3634da959:34443 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-27T03:36:19,258 WARN [66d3634da959:38227 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-27T03:36:19,334 INFO [RS:0;66d3634da959:34443 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C34443%2C1732678578710, suffix=, logDir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710, archiveDir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/oldWALs, maxLogs=32 2024-11-27T03:36:19,335 INFO [RS:0;66d3634da959:34443 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C34443%2C1732678578710.1732678579335 2024-11-27T03:36:19,344 INFO [RS:0;66d3634da959:34443 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710/66d3634da959%2C34443%2C1732678578710.1732678579335 2024-11-27T03:36:19,345 DEBUG [RS:0;66d3634da959:34443 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43059:43059),(127.0.0.1/127.0.0.1:46189:46189)] 2024-11-27T03:36:19,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:19,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:19,509 DEBUG [66d3634da959:38227 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-27T03:36:19,510 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=66d3634da959,34443,1732678578710 2024-11-27T03:36:19,513 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,34443,1732678578710, state=OPENING 2024-11-27T03:36:19,521 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-27T03:36:19,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:19,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:19,533 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:36:19,533 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:36:19,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,34443,1732678578710}] 2024-11-27T03:36:19,533 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:36:19,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:19,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:19,688 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-27T03:36:19,691 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52247, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-27T03:36:19,698 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-27T03:36:19,699 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:36:19,701 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C34443%2C1732678578710.meta, suffix=.meta, logDir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710, archiveDir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/oldWALs, maxLogs=32 2024-11-27T03:36:19,702 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C34443%2C1732678578710.meta.1732678579702.meta 2024-11-27T03:36:19,707 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710/66d3634da959%2C34443%2C1732678578710.meta.1732678579702.meta 2024-11-27T03:36:19,709 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46189:46189),(127.0.0.1/127.0.0.1:43059:43059)] 2024-11-27T03:36:19,711 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:36:19,712 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-27T03:36:19,712 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-27T03:36:19,712 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-27T03:36:19,712 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-27T03:36:19,712 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:36:19,712 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-27T03:36:19,712 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-27T03:36:19,713 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:36:19,714 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:36:19,714 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:19,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:36:19,715 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:36:19,715 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:36:19,715 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:19,716 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:36:19,716 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:36:19,716 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:36:19,716 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:19,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:36:19,717 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:36:19,717 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:36:19,717 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:19,718 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:36:19,718 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:36:19,718 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740 2024-11-27T03:36:19,719 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740 2024-11-27T03:36:19,720 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:36:19,720 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:36:19,721 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:36:19,721 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:36:19,722 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=837200, jitterRate=0.06455597281455994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:36:19,722 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-27T03:36:19,723 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732678579712Writing region info on filesystem at 1732678579712Initializing all the Stores at 1732678579713 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678579713Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678579713Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678579713Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678579713Cleaning up temporary data from old regions at 1732678579720 (+7 ms)Running coprocessor post-open hooks at 1732678579722 (+2 ms)Region opened successfully at 1732678579723 (+1 ms) 2024-11-27T03:36:19,723 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732678579687 2024-11-27T03:36:19,725 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-27T03:36:19,725 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-27T03:36:19,726 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,34443,1732678578710 2024-11-27T03:36:19,727 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,34443,1732678578710, state=OPEN 2024-11-27T03:36:19,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:36:19,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:36:19,764 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:36:19,764 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:36:19,764 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=66d3634da959,34443,1732678578710 2024-11-27T03:36:19,768 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-27T03:36:19,768 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,34443,1732678578710 in 232 msec 2024-11-27T03:36:19,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-27T03:36:19,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 662 msec 2024-11-27T03:36:19,773 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:36:19,773 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-27T03:36:19,775 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:36:19,775 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,34443,1732678578710, seqNum=-1] 2024-11-27T03:36:19,775 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:36:19,777 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55847, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:36:19,785 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 716 msec 2024-11-27T03:36:19,785 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732678579785, completionTime=-1 2024-11-27T03:36:19,785 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-27T03:36:19,785 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-27T03:36:19,788 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-27T03:36:19,788 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732678639788 2024-11-27T03:36:19,788 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732678699788 2024-11-27T03:36:19,788 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-27T03:36:19,788 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38227,1732678578531-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,788 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38227,1732678578531-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,788 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38227,1732678578531-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,788 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-66d3634da959:38227, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,789 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,789 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,791 DEBUG [master/66d3634da959:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-27T03:36:19,794 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.009sec 2024-11-27T03:36:19,794 INFO [master/66d3634da959:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-27T03:36:19,794 INFO [master/66d3634da959:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-27T03:36:19,794 INFO [master/66d3634da959:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-27T03:36:19,794 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-27T03:36:19,794 INFO [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-27T03:36:19,794 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38227,1732678578531-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:36:19,795 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38227,1732678578531-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-27T03:36:19,797 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-27T03:36:19,797 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-27T03:36:19,797 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38227,1732678578531-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:36:19,836 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6182ce6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:36:19,836 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 66d3634da959,38227,-1 for getting cluster id 2024-11-27T03:36:19,837 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-27T03:36:19,839 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '297bf68e-4a3c-441d-832a-afc5e24f7963' 2024-11-27T03:36:19,839 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-27T03:36:19,839 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "297bf68e-4a3c-441d-832a-afc5e24f7963" 2024-11-27T03:36:19,840 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5221f357, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:36:19,840 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [66d3634da959,38227,-1] 2024-11-27T03:36:19,840 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-27T03:36:19,841 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:19,842 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60404, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-27T03:36:19,844 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b6d286b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:36:19,844 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:36:19,846 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,34443,1732678578710, seqNum=-1] 2024-11-27T03:36:19,846 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:36:19,848 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34922, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:36:19,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=66d3634da959,38227,1732678578531 2024-11-27T03:36:19,851 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:19,854 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-27T03:36:19,854 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-27T03:36:19,856 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 66d3634da959,38227,1732678578531 2024-11-27T03:36:19,856 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7a2edca0 2024-11-27T03:36:19,856 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-27T03:36:19,857 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60412, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-27T03:36:19,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38227 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-27T03:36:19,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38227 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-27T03:36:19,858 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38227 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:36:19,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38227 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-27T03:36:19,861 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-27T03:36:19,861 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:19,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38227 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-27T03:36:19,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-27T03:36:19,863 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-27T03:36:19,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741835_1011 (size=381) 2024-11-27T03:36:19,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741835_1011 (size=381) 2024-11-27T03:36:19,872 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => df698933c24cf07640381536ae02d6b5, NAME => 'TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42 2024-11-27T03:36:19,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741836_1012 (size=64) 2024-11-27T03:36:19,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741836_1012 (size=64) 2024-11-27T03:36:19,878 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:36:19,878 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing df698933c24cf07640381536ae02d6b5, disabling compactions & flushes 2024-11-27T03:36:19,878 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:19,878 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:19,878 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. after waiting 0 ms 2024-11-27T03:36:19,878 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:19,878 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:19,878 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for df698933c24cf07640381536ae02d6b5: Waiting for close lock at 1732678579878Disabling compacts and flushes for region at 1732678579878Disabling writes for close at 1732678579878Writing region close event to WAL at 1732678579878Closed at 1732678579878 2024-11-27T03:36:19,880 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-27T03:36:19,880 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732678579880"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732678579880"}]},"ts":"1732678579880"} 2024-11-27T03:36:19,882 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-27T03:36:19,883 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-27T03:36:19,884 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732678579883"}]},"ts":"1732678579883"} 2024-11-27T03:36:19,885 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-27T03:36:19,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df698933c24cf07640381536ae02d6b5, ASSIGN}] 2024-11-27T03:36:19,887 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df698933c24cf07640381536ae02d6b5, ASSIGN 2024-11-27T03:36:19,888 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df698933c24cf07640381536ae02d6b5, ASSIGN; state=OFFLINE, location=66d3634da959,34443,1732678578710; forceNewPlan=false, retain=false 2024-11-27T03:36:20,039 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=df698933c24cf07640381536ae02d6b5, regionState=OPENING, regionLocation=66d3634da959,34443,1732678578710 2024-11-27T03:36:20,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df698933c24cf07640381536ae02d6b5, ASSIGN because future has completed 2024-11-27T03:36:20,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure df698933c24cf07640381536ae02d6b5, server=66d3634da959,34443,1732678578710}] 2024-11-27T03:36:20,206 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:20,207 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => df698933c24cf07640381536ae02d6b5, NAME => 'TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:36:20,207 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,207 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:36:20,208 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,208 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,209 INFO [StoreOpener-df698933c24cf07640381536ae02d6b5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,211 INFO [StoreOpener-df698933c24cf07640381536ae02d6b5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region df698933c24cf07640381536ae02d6b5 columnFamilyName info 2024-11-27T03:36:20,211 DEBUG [StoreOpener-df698933c24cf07640381536ae02d6b5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:20,212 INFO [StoreOpener-df698933c24cf07640381536ae02d6b5-1 {}] regionserver.HStore(327): Store=df698933c24cf07640381536ae02d6b5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:36:20,212 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,212 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,213 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,213 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,213 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,215 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,216 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:36:20,217 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened df698933c24cf07640381536ae02d6b5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821212, jitterRate=0.04422621428966522}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-27T03:36:20,217 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:20,217 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for df698933c24cf07640381536ae02d6b5: Running coprocessor pre-open hook at 1732678580208Writing region info on filesystem at 1732678580208Initializing all the Stores at 1732678580209 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678580209Cleaning up temporary data from old regions at 1732678580213 (+4 ms)Running coprocessor post-open hooks at 1732678580217 (+4 ms)Region opened successfully at 1732678580217 2024-11-27T03:36:20,218 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., pid=6, masterSystemTime=1732678580198 2024-11-27T03:36:20,220 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:20,220 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:20,221 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=df698933c24cf07640381536ae02d6b5, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,34443,1732678578710 2024-11-27T03:36:20,223 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure df698933c24cf07640381536ae02d6b5, server=66d3634da959,34443,1732678578710 because future has completed 2024-11-27T03:36:20,227 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-27T03:36:20,227 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure df698933c24cf07640381536ae02d6b5, server=66d3634da959,34443,1732678578710 in 181 msec 2024-11-27T03:36:20,230 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-27T03:36:20,230 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df698933c24cf07640381536ae02d6b5, ASSIGN in 341 msec 2024-11-27T03:36:20,230 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-27T03:36:20,231 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732678580230"}]},"ts":"1732678580230"} 2024-11-27T03:36:20,232 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-27T03:36:20,233 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-27T03:36:20,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 375 msec 2024-11-27T03:36:20,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:20,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:20,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:20,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:20,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:20,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,133 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-27T03:36:21,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:21,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:21,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:21,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:21,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:22,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:22,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:22,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:22,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:23,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:23,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:23,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:23,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:24,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:24,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:24,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:24,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:25,199 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-27T03:36:25,201 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-27T03:36:25,230 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-27T03:36:25,230 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-27T03:36:25,232 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-27T03:36:25,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:25,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:25,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:25,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:26,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:26,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:26,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:26,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:27,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:27,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:27,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:27,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:28,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:28,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:28,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:28,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:29,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:29,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:29,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:29,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:29,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38227 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-27T03:36:29,924 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-27T03:36:29,924 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-27T03:36:29,926 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-27T03:36:29,926 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:29,928 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., hostname=66d3634da959,34443,1732678578710, seqNum=2] 2024-11-27T03:36:29,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:29,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df698933c24cf07640381536ae02d6b5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:36:29,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/3ca7a4e3d6724aa7963d5199c2493db8 is 1080, key is row0001/info:/1732678589929/Put/seqid=0 2024-11-27T03:36:29,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741837_1013 (size=12509) 2024-11-27T03:36:29,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741837_1013 (size=12509) 2024-11-27T03:36:29,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/3ca7a4e3d6724aa7963d5199c2493db8 2024-11-27T03:36:29,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/3ca7a4e3d6724aa7963d5199c2493db8 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/3ca7a4e3d6724aa7963d5199c2493db8 2024-11-27T03:36:29,985 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/3ca7a4e3d6724aa7963d5199c2493db8, entries=7, sequenceid=11, filesize=12.2 K 2024-11-27T03:36:29,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for df698933c24cf07640381536ae02d6b5 in 44ms, sequenceid=11, compaction requested=false 2024-11-27T03:36:29,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:29,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:29,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df698933c24cf07640381536ae02d6b5 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-27T03:36:29,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/c2d2d9fbd0124bba81975c577fe409d0 is 1080, key is row0008/info:/1732678589944/Put/seqid=0 2024-11-27T03:36:29,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741838_1014 (size=25453) 2024-11-27T03:36:29,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741838_1014 (size=25453) 2024-11-27T03:36:29,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/c2d2d9fbd0124bba81975c577fe409d0 2024-11-27T03:36:30,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/c2d2d9fbd0124bba81975c577fe409d0 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c2d2d9fbd0124bba81975c577fe409d0 2024-11-27T03:36:30,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c2d2d9fbd0124bba81975c577fe409d0, entries=19, sequenceid=33, filesize=24.9 K 2024-11-27T03:36:30,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=6.30 KB/6456 for df698933c24cf07640381536ae02d6b5 in 20ms, sequenceid=33, compaction requested=false 2024-11-27T03:36:30,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:30,008 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=37.1 K, sizeToCheck=16.0 K 2024-11-27T03:36:30,008 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:30,008 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c2d2d9fbd0124bba81975c577fe409d0 because midkey is the same as first or last row 2024-11-27T03:36:30,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:30,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:30,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:30,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:30,736 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-27T03:36:30,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,770 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:30,773 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:31,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:31,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:31,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:31,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:32,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:32,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df698933c24cf07640381536ae02d6b5 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:36:32,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/c7d74a9834fb4526945cb4ce933e8e2d is 1080, key is row0027/info:/1732678589988/Put/seqid=0 2024-11-27T03:36:32,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741839_1015 (size=12509) 2024-11-27T03:36:32,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741839_1015 (size=12509) 2024-11-27T03:36:32,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/c7d74a9834fb4526945cb4ce933e8e2d 2024-11-27T03:36:32,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/c7d74a9834fb4526945cb4ce933e8e2d as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c7d74a9834fb4526945cb4ce933e8e2d 2024-11-27T03:36:32,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c7d74a9834fb4526945cb4ce933e8e2d, entries=7, sequenceid=43, filesize=12.2 K 2024-11-27T03:36:32,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for df698933c24cf07640381536ae02d6b5 in 28ms, sequenceid=43, compaction requested=true 2024-11-27T03:36:32,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:32,032 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-27T03:36:32,032 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:32,032 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c2d2d9fbd0124bba81975c577fe409d0 because midkey is the same as first or last row 2024-11-27T03:36:32,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store df698933c24cf07640381536ae02d6b5:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:32,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:32,032 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:32,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:32,032 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df698933c24cf07640381536ae02d6b5 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-27T03:36:32,033 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:32,034 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1541): df698933c24cf07640381536ae02d6b5/info is initiating minor compaction (all files) 2024-11-27T03:36:32,034 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of df698933c24cf07640381536ae02d6b5/info in TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:32,034 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/3ca7a4e3d6724aa7963d5199c2493db8, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c2d2d9fbd0124bba81975c577fe409d0, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c7d74a9834fb4526945cb4ce933e8e2d] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp, totalSize=49.3 K 2024-11-27T03:36:32,034 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3ca7a4e3d6724aa7963d5199c2493db8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732678589929 2024-11-27T03:36:32,035 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting c2d2d9fbd0124bba81975c577fe409d0, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=33, earliestPutTs=1732678589944 2024-11-27T03:36:32,035 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting c7d74a9834fb4526945cb4ce933e8e2d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732678589988 2024-11-27T03:36:32,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/20abad098a1847b4bf15f364e01a57ee is 1080, key is row0034/info:/1732678592006/Put/seqid=0 2024-11-27T03:36:32,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741840_1016 (size=16817) 2024-11-27T03:36:32,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741840_1016 (size=16817) 2024-11-27T03:36:32,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/20abad098a1847b4bf15f364e01a57ee 2024-11-27T03:36:32,051 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): df698933c24cf07640381536ae02d6b5#info#compaction#59 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:32,051 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/7dcf9ba0f7194c369cbd2d2902ae8ac2 is 1080, key is row0001/info:/1732678589929/Put/seqid=0 2024-11-27T03:36:32,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/20abad098a1847b4bf15f364e01a57ee as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/20abad098a1847b4bf15f364e01a57ee 2024-11-27T03:36:32,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741841_1017 (size=40670) 2024-11-27T03:36:32,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741841_1017 (size=40670) 2024-11-27T03:36:32,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/20abad098a1847b4bf15f364e01a57ee, entries=11, sequenceid=57, filesize=16.4 K 2024-11-27T03:36:32,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for df698933c24cf07640381536ae02d6b5 in 27ms, sequenceid=57, compaction requested=false 2024-11-27T03:36:32,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:32,060 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.7 K, sizeToCheck=16.0 K 2024-11-27T03:36:32,060 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:32,060 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c2d2d9fbd0124bba81975c577fe409d0 because midkey is the same as first or last row 2024-11-27T03:36:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:32,062 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df698933c24cf07640381536ae02d6b5 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-27T03:36:32,067 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/7dcf9ba0f7194c369cbd2d2902ae8ac2 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/7dcf9ba0f7194c369cbd2d2902ae8ac2 2024-11-27T03:36:32,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/da27548bcb244c67881f3fe6cf512cc2 is 1080, key is row0045/info:/1732678592034/Put/seqid=0 2024-11-27T03:36:32,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741842_1018 (size=18987) 2024-11-27T03:36:32,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741842_1018 (size=18987) 2024-11-27T03:36:32,074 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/da27548bcb244c67881f3fe6cf512cc2 2024-11-27T03:36:32,074 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in df698933c24cf07640381536ae02d6b5/info of df698933c24cf07640381536ae02d6b5 into 7dcf9ba0f7194c369cbd2d2902ae8ac2(size=39.7 K), total size for store is 56.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:32,074 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:32,075 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., storeName=df698933c24cf07640381536ae02d6b5/info, priority=13, startTime=1732678592032; duration=0sec 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/7dcf9ba0f7194c369cbd2d2902ae8ac2 because midkey is the same as first or last row 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/7dcf9ba0f7194c369cbd2d2902ae8ac2 because midkey is the same as first or last row 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/7dcf9ba0f7194c369cbd2d2902ae8ac2 because midkey is the same as first or last row 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:32,075 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: df698933c24cf07640381536ae02d6b5:info 2024-11-27T03:36:32,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/da27548bcb244c67881f3fe6cf512cc2 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/da27548bcb244c67881f3fe6cf512cc2 2024-11-27T03:36:32,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/da27548bcb244c67881f3fe6cf512cc2, entries=13, sequenceid=73, filesize=18.5 K 2024-11-27T03:36:32,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=7.36 KB/7532 for df698933c24cf07640381536ae02d6b5 in 24ms, sequenceid=73, compaction requested=true 2024-11-27T03:36:32,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:32,086 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-11-27T03:36:32,086 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:32,087 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/7dcf9ba0f7194c369cbd2d2902ae8ac2 because midkey is the same as first or last row 2024-11-27T03:36:32,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store df698933c24cf07640381536ae02d6b5:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:32,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:32,087 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:32,088 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76474 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:32,088 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1541): df698933c24cf07640381536ae02d6b5/info is initiating minor compaction (all files) 2024-11-27T03:36:32,088 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of df698933c24cf07640381536ae02d6b5/info in TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:32,088 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/7dcf9ba0f7194c369cbd2d2902ae8ac2, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/20abad098a1847b4bf15f364e01a57ee, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/da27548bcb244c67881f3fe6cf512cc2] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp, totalSize=74.7 K 2024-11-27T03:36:32,088 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7dcf9ba0f7194c369cbd2d2902ae8ac2, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732678589929 2024-11-27T03:36:32,089 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 20abad098a1847b4bf15f364e01a57ee, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732678592006 2024-11-27T03:36:32,089 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting da27548bcb244c67881f3fe6cf512cc2, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732678592034 2024-11-27T03:36:32,099 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): df698933c24cf07640381536ae02d6b5#info#compaction#61 average throughput is 29.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:32,099 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/79199b74fb684692adfb5a61e82f0672 is 1080, key is row0001/info:/1732678589929/Put/seqid=0 2024-11-27T03:36:32,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741843_1019 (size=66689) 2024-11-27T03:36:32,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741843_1019 (size=66689) 2024-11-27T03:36:32,110 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/79199b74fb684692adfb5a61e82f0672 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672 2024-11-27T03:36:32,117 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in df698933c24cf07640381536ae02d6b5/info of df698933c24cf07640381536ae02d6b5 into 79199b74fb684692adfb5a61e82f0672(size=65.1 K), total size for store is 65.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:32,117 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., storeName=df698933c24cf07640381536ae02d6b5/info, priority=13, startTime=1732678592087; duration=0sec 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672 because midkey is the same as first or last row 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672 because midkey is the same as first or last row 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672 because midkey is the same as first or last row 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:32,117 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: df698933c24cf07640381536ae02d6b5:info 2024-11-27T03:36:32,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:32,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:32,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:32,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:33,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:33,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:33,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:33,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:34,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df698933c24cf07640381536ae02d6b5 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-27T03:36:34,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/aaf8de86cd7c4484a3b2b2f0a72c4086 is 1080, key is row0058/info:/1732678592064/Put/seqid=0 2024-11-27T03:36:34,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741844_1020 (size=13586) 2024-11-27T03:36:34,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741844_1020 (size=13586) 2024-11-27T03:36:34,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/aaf8de86cd7c4484a3b2b2f0a72c4086 2024-11-27T03:36:34,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/aaf8de86cd7c4484a3b2b2f0a72c4086 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/aaf8de86cd7c4484a3b2b2f0a72c4086 2024-11-27T03:36:34,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/aaf8de86cd7c4484a3b2b2f0a72c4086, entries=8, sequenceid=86, filesize=13.3 K 2024-11-27T03:36:34,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for df698933c24cf07640381536ae02d6b5 in 28ms, sequenceid=86, compaction requested=false 2024-11-27T03:36:34,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:34,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,113 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-27T03:36:34,113 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:34,113 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672 because midkey is the same as first or last row 2024-11-27T03:36:34,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df698933c24cf07640381536ae02d6b5 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-27T03:36:34,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/9b32648b3f1d4bac930731a94893557a is 1080, key is row0066/info:/1732678594088/Put/seqid=0 2024-11-27T03:36:34,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741845_1021 (size=16817) 2024-11-27T03:36:34,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741845_1021 (size=16817) 2024-11-27T03:36:34,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/9b32648b3f1d4bac930731a94893557a 2024-11-27T03:36:34,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/9b32648b3f1d4bac930731a94893557a as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/9b32648b3f1d4bac930731a94893557a 2024-11-27T03:36:34,135 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/9b32648b3f1d4bac930731a94893557a, entries=11, sequenceid=100, filesize=16.4 K 2024-11-27T03:36:34,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for df698933c24cf07640381536ae02d6b5 in 23ms, sequenceid=100, compaction requested=true 2024-11-27T03:36:34,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:34,136 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.8 K, sizeToCheck=16.0 K 2024-11-27T03:36:34,136 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:34,136 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672 because midkey is the same as first or last row 2024-11-27T03:36:34,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store df698933c24cf07640381536ae02d6b5:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:34,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:34,136 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:34,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing df698933c24cf07640381536ae02d6b5 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-27T03:36:34,138 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 97092 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:34,138 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1541): df698933c24cf07640381536ae02d6b5/info is initiating minor compaction (all files) 2024-11-27T03:36:34,138 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of df698933c24cf07640381536ae02d6b5/info in TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:34,138 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/aaf8de86cd7c4484a3b2b2f0a72c4086, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/9b32648b3f1d4bac930731a94893557a] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp, totalSize=94.8 K 2024-11-27T03:36:34,138 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 79199b74fb684692adfb5a61e82f0672, keycount=57, bloomtype=ROW, size=65.1 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732678589929 2024-11-27T03:36:34,139 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting aaf8de86cd7c4484a3b2b2f0a72c4086, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732678592064 2024-11-27T03:36:34,139 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b32648b3f1d4bac930731a94893557a, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732678594088 2024-11-27T03:36:34,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/d41cc11c412c48939289631143116878 is 1080, key is row0077/info:/1732678594114/Put/seqid=0 2024-11-27T03:36:34,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741846_1022 (size=17894) 2024-11-27T03:36:34,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741846_1022 (size=17894) 2024-11-27T03:36:34,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/d41cc11c412c48939289631143116878 2024-11-27T03:36:34,155 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): df698933c24cf07640381536ae02d6b5#info#compaction#65 average throughput is 26.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:34,156 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/2c4d9126f708411d9a7ad5ef88ca4823 is 1080, key is row0001/info:/1732678589929/Put/seqid=0 2024-11-27T03:36:34,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/d41cc11c412c48939289631143116878 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/d41cc11c412c48939289631143116878 2024-11-27T03:36:34,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741847_1023 (size=87327) 2024-11-27T03:36:34,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741847_1023 (size=87327) 2024-11-27T03:36:34,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/d41cc11c412c48939289631143116878, entries=12, sequenceid=115, filesize=17.5 K 2024-11-27T03:36:34,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=8.41 KB/8608 for df698933c24cf07640381536ae02d6b5 in 27ms, sequenceid=115, compaction requested=false 2024-11-27T03:36:34,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:34,163 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.3 K, sizeToCheck=16.0 K 2024-11-27T03:36:34,163 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:34,163 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672 because midkey is the same as first or last row 2024-11-27T03:36:34,164 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/2c4d9126f708411d9a7ad5ef88ca4823 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/2c4d9126f708411d9a7ad5ef88ca4823 2024-11-27T03:36:34,170 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in df698933c24cf07640381536ae02d6b5/info of df698933c24cf07640381536ae02d6b5 into 2c4d9126f708411d9a7ad5ef88ca4823(size=85.3 K), total size for store is 102.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:34,170 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for df698933c24cf07640381536ae02d6b5: 2024-11-27T03:36:34,170 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., storeName=df698933c24cf07640381536ae02d6b5/info, priority=13, startTime=1732678594136; duration=0sec 2024-11-27T03:36:34,170 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-27T03:36:34,170 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:34,170 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-27T03:36:34,170 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:34,170 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-11-27T03:36:34,170 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-27T03:36:34,171 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:34,171 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:34,171 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: df698933c24cf07640381536ae02d6b5:info 2024-11-27T03:36:34,172 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38227 {}] assignment.AssignmentManager(1363): Split request from 66d3634da959,34443,1732678578710, parent={ENCODED => df698933c24cf07640381536ae02d6b5, NAME => 'TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-27T03:36:34,178 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38227 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=66d3634da959,34443,1732678578710 2024-11-27T03:36:34,183 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38227 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=df698933c24cf07640381536ae02d6b5, daughterA=bff5b7c2fe3e28b8661aa63586114985, daughterB=c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,184 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=df698933c24cf07640381536ae02d6b5, daughterA=bff5b7c2fe3e28b8661aa63586114985, daughterB=c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,184 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=df698933c24cf07640381536ae02d6b5, daughterA=bff5b7c2fe3e28b8661aa63586114985, daughterB=c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,185 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=df698933c24cf07640381536ae02d6b5, daughterA=bff5b7c2fe3e28b8661aa63586114985, daughterB=c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df698933c24cf07640381536ae02d6b5, UNASSIGN}] 2024-11-27T03:36:34,194 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df698933c24cf07640381536ae02d6b5, UNASSIGN 2024-11-27T03:36:34,195 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=df698933c24cf07640381536ae02d6b5, regionState=CLOSING, regionLocation=66d3634da959,34443,1732678578710 2024-11-27T03:36:34,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df698933c24cf07640381536ae02d6b5, UNASSIGN because future has completed 2024-11-27T03:36:34,198 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-27T03:36:34,199 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure df698933c24cf07640381536ae02d6b5, server=66d3634da959,34443,1732678578710}] 2024-11-27T03:36:34,356 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,357 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-27T03:36:34,358 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing df698933c24cf07640381536ae02d6b5, disabling compactions & flushes 2024-11-27T03:36:34,358 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:34,358 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:34,358 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. after waiting 0 ms 2024-11-27T03:36:34,358 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:34,358 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing df698933c24cf07640381536ae02d6b5 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-27T03:36:34,366 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/51eefa78f2994b429e98aa86ab1f5334 is 1080, key is row0089/info:/1732678594138/Put/seqid=0 2024-11-27T03:36:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741848_1024 (size=13586) 2024-11-27T03:36:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741848_1024 (size=13586) 2024-11-27T03:36:34,371 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/51eefa78f2994b429e98aa86ab1f5334 2024-11-27T03:36:34,376 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/.tmp/info/51eefa78f2994b429e98aa86ab1f5334 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/51eefa78f2994b429e98aa86ab1f5334 2024-11-27T03:36:34,382 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/51eefa78f2994b429e98aa86ab1f5334, entries=8, sequenceid=127, filesize=13.3 K 2024-11-27T03:36:34,384 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for df698933c24cf07640381536ae02d6b5 in 26ms, sequenceid=127, compaction requested=true 2024-11-27T03:36:34,385 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/3ca7a4e3d6724aa7963d5199c2493db8, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c2d2d9fbd0124bba81975c577fe409d0, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/7dcf9ba0f7194c369cbd2d2902ae8ac2, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c7d74a9834fb4526945cb4ce933e8e2d, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/20abad098a1847b4bf15f364e01a57ee, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/da27548bcb244c67881f3fe6cf512cc2, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/aaf8de86cd7c4484a3b2b2f0a72c4086, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/9b32648b3f1d4bac930731a94893557a] to archive 2024-11-27T03:36:34,386 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T03:36:34,388 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/3ca7a4e3d6724aa7963d5199c2493db8 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/3ca7a4e3d6724aa7963d5199c2493db8 2024-11-27T03:36:34,390 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c2d2d9fbd0124bba81975c577fe409d0 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c2d2d9fbd0124bba81975c577fe409d0 2024-11-27T03:36:34,392 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/7dcf9ba0f7194c369cbd2d2902ae8ac2 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/7dcf9ba0f7194c369cbd2d2902ae8ac2 2024-11-27T03:36:34,393 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c7d74a9834fb4526945cb4ce933e8e2d to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/c7d74a9834fb4526945cb4ce933e8e2d 2024-11-27T03:36:34,395 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/20abad098a1847b4bf15f364e01a57ee to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/20abad098a1847b4bf15f364e01a57ee 2024-11-27T03:36:34,396 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/79199b74fb684692adfb5a61e82f0672 2024-11-27T03:36:34,398 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/da27548bcb244c67881f3fe6cf512cc2 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/da27548bcb244c67881f3fe6cf512cc2 2024-11-27T03:36:34,399 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/aaf8de86cd7c4484a3b2b2f0a72c4086 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/aaf8de86cd7c4484a3b2b2f0a72c4086 2024-11-27T03:36:34,401 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/9b32648b3f1d4bac930731a94893557a to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/9b32648b3f1d4bac930731a94893557a 2024-11-27T03:36:34,408 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-27T03:36:34,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:34,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:34,409 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. 2024-11-27T03:36:34,409 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for df698933c24cf07640381536ae02d6b5: Waiting for close lock at 1732678594358Running coprocessor pre-close hooks at 1732678594358Disabling compacts and flushes for region at 1732678594358Disabling writes for close at 1732678594358Obtaining lock to block concurrent updates at 1732678594358Preparing flush snapshotting stores in df698933c24cf07640381536ae02d6b5 at 1732678594358Finished memstore snapshotting TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., syncing WAL and waiting on mvcc, flushsize=dataSize=8608, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1732678594359 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. at 1732678594360 (+1 ms)Flushing df698933c24cf07640381536ae02d6b5/info: creating writer at 1732678594360Flushing df698933c24cf07640381536ae02d6b5/info: appending metadata at 1732678594365 (+5 ms)Flushing df698933c24cf07640381536ae02d6b5/info: closing flushed file at 1732678594365Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35925cb1: reopening flushed file at 1732678594375 (+10 ms)Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for df698933c24cf07640381536ae02d6b5 in 26ms, sequenceid=127, compaction requested=true at 1732678594384 (+9 ms)Writing region close event to WAL at 1732678594403 (+19 ms)Running coprocessor post-close hooks at 1732678594409 (+6 ms)Closed at 1732678594409 2024-11-27T03:36:34,413 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,414 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=df698933c24cf07640381536ae02d6b5, regionState=CLOSED 2024-11-27T03:36:34,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure df698933c24cf07640381536ae02d6b5, server=66d3634da959,34443,1732678578710 because future has completed 2024-11-27T03:36:34,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-27T03:36:34,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure df698933c24cf07640381536ae02d6b5, server=66d3634da959,34443,1732678578710 in 220 msec 2024-11-27T03:36:34,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-27T03:36:34,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=df698933c24cf07640381536ae02d6b5, UNASSIGN in 230 msec 2024-11-27T03:36:34,434 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:34,438 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=df698933c24cf07640381536ae02d6b5, threads=3 2024-11-27T03:36:34,440 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/d41cc11c412c48939289631143116878 for region: df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,440 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/51eefa78f2994b429e98aa86ab1f5334 for region: df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,440 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/2c4d9126f708411d9a7ad5ef88ca4823 for region: df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,449 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/d41cc11c412c48939289631143116878, top=true 2024-11-27T03:36:34,450 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/51eefa78f2994b429e98aa86ab1f5334, top=true 2024-11-27T03:36:34,455 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-51eefa78f2994b429e98aa86ab1f5334 for child: c3e92841af8e3280a2906309e831412a, parent: df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,455 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-d41cc11c412c48939289631143116878 for child: c3e92841af8e3280a2906309e831412a, parent: df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,456 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/51eefa78f2994b429e98aa86ab1f5334 for region: df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,456 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/d41cc11c412c48939289631143116878 for region: df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741849_1025 (size=27) 2024-11-27T03:36:34,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741849_1025 (size=27) 2024-11-27T03:36:34,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741850_1026 (size=27) 2024-11-27T03:36:34,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741850_1026 (size=27) 2024-11-27T03:36:34,466 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/2c4d9126f708411d9a7ad5ef88ca4823 for region: df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:34,468 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region df698933c24cf07640381536ae02d6b5 Daughter A: [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5] storefiles, Daughter B: [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-51eefa78f2994b429e98aa86ab1f5334, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-d41cc11c412c48939289631143116878] storefiles. 2024-11-27T03:36:34,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741851_1027 (size=71) 2024-11-27T03:36:34,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741851_1027 (size=71) 2024-11-27T03:36:34,479 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:34,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741852_1028 (size=71) 2024-11-27T03:36:34,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741852_1028 (size=71) 2024-11-27T03:36:34,492 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:34,501 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-27T03:36:34,503 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-27T03:36:34,505 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732678594505"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732678594505"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732678594505"}]},"ts":"1732678594505"} 2024-11-27T03:36:34,505 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732678594505"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732678594505"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732678594505"}]},"ts":"1732678594505"} 2024-11-27T03:36:34,506 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732678594505"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732678594505"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732678594505"}]},"ts":"1732678594505"} 2024-11-27T03:36:34,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bff5b7c2fe3e28b8661aa63586114985, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c3e92841af8e3280a2906309e831412a, ASSIGN}] 2024-11-27T03:36:34,523 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bff5b7c2fe3e28b8661aa63586114985, ASSIGN 2024-11-27T03:36:34,523 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c3e92841af8e3280a2906309e831412a, ASSIGN 2024-11-27T03:36:34,524 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bff5b7c2fe3e28b8661aa63586114985, ASSIGN; state=SPLITTING_NEW, location=66d3634da959,34443,1732678578710; forceNewPlan=false, retain=false 2024-11-27T03:36:34,524 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c3e92841af8e3280a2906309e831412a, ASSIGN; state=SPLITTING_NEW, location=66d3634da959,34443,1732678578710; forceNewPlan=false, retain=false 2024-11-27T03:36:34,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:34,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:34,674 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=bff5b7c2fe3e28b8661aa63586114985, regionState=OPENING, regionLocation=66d3634da959,34443,1732678578710 2024-11-27T03:36:34,674 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c3e92841af8e3280a2906309e831412a, regionState=OPENING, regionLocation=66d3634da959,34443,1732678578710 2024-11-27T03:36:34,677 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c3e92841af8e3280a2906309e831412a, ASSIGN because future has completed 2024-11-27T03:36:34,677 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c3e92841af8e3280a2906309e831412a, server=66d3634da959,34443,1732678578710}] 2024-11-27T03:36:34,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bff5b7c2fe3e28b8661aa63586114985, ASSIGN because future has completed 2024-11-27T03:36:34,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure bff5b7c2fe3e28b8661aa63586114985, server=66d3634da959,34443,1732678578710}] 2024-11-27T03:36:34,838 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:34,838 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => c3e92841af8e3280a2906309e831412a, NAME => 'TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-27T03:36:34,839 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,839 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:36:34,839 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,839 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,842 INFO [StoreOpener-c3e92841af8e3280a2906309e831412a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,844 INFO [StoreOpener-c3e92841af8e3280a2906309e831412a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c3e92841af8e3280a2906309e831412a columnFamilyName info 2024-11-27T03:36:34,844 DEBUG [StoreOpener-c3e92841af8e3280a2906309e831412a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:34,853 DEBUG [StoreOpener-c3e92841af8e3280a2906309e831412a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5->hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/2c4d9126f708411d9a7ad5ef88ca4823-top 2024-11-27T03:36:34,857 DEBUG [StoreOpener-c3e92841af8e3280a2906309e831412a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-51eefa78f2994b429e98aa86ab1f5334 2024-11-27T03:36:34,861 DEBUG [StoreOpener-c3e92841af8e3280a2906309e831412a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-d41cc11c412c48939289631143116878 2024-11-27T03:36:34,861 INFO [StoreOpener-c3e92841af8e3280a2906309e831412a-1 {}] regionserver.HStore(327): Store=c3e92841af8e3280a2906309e831412a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:36:34,861 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,862 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,863 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,863 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,863 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,865 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,866 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened c3e92841af8e3280a2906309e831412a; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=729174, jitterRate=-0.07280802726745605}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-27T03:36:34,866 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:34,867 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for c3e92841af8e3280a2906309e831412a: Running coprocessor pre-open hook at 1732678594840Writing region info on filesystem at 1732678594840Initializing all the Stores at 1732678594842 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678594842Cleaning up temporary data from old regions at 1732678594863 (+21 ms)Running coprocessor post-open hooks at 1732678594866 (+3 ms)Region opened successfully at 1732678594867 (+1 ms) 2024-11-27T03:36:34,868 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., pid=12, masterSystemTime=1732678594830 2024-11-27T03:36:34,868 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store c3e92841af8e3280a2906309e831412a:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:34,868 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:34,868 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:34,869 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:34,869 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1541): c3e92841af8e3280a2906309e831412a/info is initiating minor compaction (all files) 2024-11-27T03:36:34,869 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3e92841af8e3280a2906309e831412a/info in TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:34,869 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5->hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/2c4d9126f708411d9a7ad5ef88ca4823-top, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-d41cc11c412c48939289631143116878, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-51eefa78f2994b429e98aa86ab1f5334] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp, totalSize=116.0 K 2024-11-27T03:36:34,870 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732678589929 2024-11-27T03:36:34,870 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:34,870 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:34,870 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. 2024-11-27T03:36:34,870 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => bff5b7c2fe3e28b8661aa63586114985, NAME => 'TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-27T03:36:34,870 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-d41cc11c412c48939289631143116878, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732678594114 2024-11-27T03:36:34,871 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,871 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:36:34,871 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,871 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,871 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-51eefa78f2994b429e98aa86ab1f5334, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732678594138 2024-11-27T03:36:34,871 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c3e92841af8e3280a2906309e831412a, regionState=OPEN, openSeqNum=131, regionLocation=66d3634da959,34443,1732678578710 2024-11-27T03:36:34,872 INFO [StoreOpener-bff5b7c2fe3e28b8661aa63586114985-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,873 INFO [StoreOpener-bff5b7c2fe3e28b8661aa63586114985-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bff5b7c2fe3e28b8661aa63586114985 columnFamilyName info 2024-11-27T03:36:34,873 DEBUG [StoreOpener-bff5b7c2fe3e28b8661aa63586114985-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:34,873 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-27T03:36:34,873 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-27T03:36:34,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-27T03:36:34,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c3e92841af8e3280a2906309e831412a, server=66d3634da959,34443,1732678578710 because future has completed 2024-11-27T03:36:34,877 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-27T03:36:34,877 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure c3e92841af8e3280a2906309e831412a, server=66d3634da959,34443,1732678578710 in 197 msec 2024-11-27T03:36:34,879 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c3e92841af8e3280a2906309e831412a, ASSIGN in 356 msec 2024-11-27T03:36:34,883 DEBUG [StoreOpener-bff5b7c2fe3e28b8661aa63586114985-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5->hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/2c4d9126f708411d9a7ad5ef88ca4823-bottom 2024-11-27T03:36:34,883 INFO [StoreOpener-bff5b7c2fe3e28b8661aa63586114985-1 {}] regionserver.HStore(327): Store=bff5b7c2fe3e28b8661aa63586114985/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:36:34,884 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,884 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,886 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,886 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,886 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,888 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,889 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened bff5b7c2fe3e28b8661aa63586114985; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817307, jitterRate=0.03926083445549011}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-27T03:36:34,889 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:34,889 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for bff5b7c2fe3e28b8661aa63586114985: Running coprocessor pre-open hook at 1732678594871Writing region info on filesystem at 1732678594871Initializing all the Stores at 1732678594872 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678594872Cleaning up temporary data from old regions at 1732678594886 (+14 ms)Running coprocessor post-open hooks at 1732678594889 (+3 ms)Region opened successfully at 1732678594889 2024-11-27T03:36:34,890 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985., pid=13, masterSystemTime=1732678594830 2024-11-27T03:36:34,890 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store bff5b7c2fe3e28b8661aa63586114985:info, priority=-2147483648, current under compaction store size is 2 2024-11-27T03:36:34,890 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:34,890 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-27T03:36:34,891 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. 2024-11-27T03:36:34,891 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HStore(1541): bff5b7c2fe3e28b8661aa63586114985/info is initiating minor compaction (all files) 2024-11-27T03:36:34,891 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of bff5b7c2fe3e28b8661aa63586114985/info in TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. 2024-11-27T03:36:34,891 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5->hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/2c4d9126f708411d9a7ad5ef88ca4823-bottom] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/.tmp, totalSize=85.3 K 2024-11-27T03:36:34,892 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.Compactor(225): Compacting 2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732678589929 2024-11-27T03:36:34,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/info/4e27caa49e2542e5914f50621a18ca61 is 193, key is TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a./info:regioninfo/1732678594871/Put/seqid=0 2024-11-27T03:36:34,892 DEBUG [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. 2024-11-27T03:36:34,892 INFO [RS_OPEN_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. 2024-11-27T03:36:34,893 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=bff5b7c2fe3e28b8661aa63586114985, regionState=OPEN, openSeqNum=131, regionLocation=66d3634da959,34443,1732678578710 2024-11-27T03:36:34,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure bff5b7c2fe3e28b8661aa63586114985, server=66d3634da959,34443,1732678578710 because future has completed 2024-11-27T03:36:34,897 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3e92841af8e3280a2906309e831412a#info#compaction#68 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:34,898 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/a4364375f84747989044a933d7f8a168 is 1080, key is row0062/info:/1732678592073/Put/seqid=0 2024-11-27T03:36:34,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741853_1029 (size=9882) 2024-11-27T03:36:34,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741853_1029 (size=9882) 2024-11-27T03:36:34,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/info/4e27caa49e2542e5914f50621a18ca61 2024-11-27T03:36:34,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-27T03:36:34,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure bff5b7c2fe3e28b8661aa63586114985, server=66d3634da959,34443,1732678578710 in 220 msec 2024-11-27T03:36:34,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-27T03:36:34,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=bff5b7c2fe3e28b8661aa63586114985, ASSIGN in 382 msec 2024-11-27T03:36:34,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741854_1030 (size=42984) 2024-11-27T03:36:34,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741854_1030 (size=42984) 2024-11-27T03:36:34,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=df698933c24cf07640381536ae02d6b5, daughterA=bff5b7c2fe3e28b8661aa63586114985, daughterB=c3e92841af8e3280a2906309e831412a in 727 msec 2024-11-27T03:36:34,911 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/a4364375f84747989044a933d7f8a168 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/a4364375f84747989044a933d7f8a168 2024-11-27T03:36:34,917 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3e92841af8e3280a2906309e831412a/info of c3e92841af8e3280a2906309e831412a into a4364375f84747989044a933d7f8a168(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:34,917 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:34,917 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., storeName=c3e92841af8e3280a2906309e831412a/info, priority=13, startTime=1732678594868; duration=0sec 2024-11-27T03:36:34,918 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:34,918 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3e92841af8e3280a2906309e831412a:info 2024-11-27T03:36:34,920 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bff5b7c2fe3e28b8661aa63586114985#info#compaction#69 average throughput is 15.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:34,920 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/.tmp/info/78021ec04dd44e9e831c2dc9151c8f96 is 1080, key is row0001/info:/1732678589929/Put/seqid=0 2024-11-27T03:36:34,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/ns/ac759aa628f34bf4b5f32c4210407bd1 is 43, key is default/ns:d/1732678579778/Put/seqid=0 2024-11-27T03:36:34,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741855_1031 (size=70862) 2024-11-27T03:36:34,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741855_1031 (size=70862) 2024-11-27T03:36:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741856_1032 (size=5153) 2024-11-27T03:36:34,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741856_1032 (size=5153) 2024-11-27T03:36:34,929 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/ns/ac759aa628f34bf4b5f32c4210407bd1 2024-11-27T03:36:34,932 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/.tmp/info/78021ec04dd44e9e831c2dc9151c8f96 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/info/78021ec04dd44e9e831c2dc9151c8f96 2024-11-27T03:36:34,938 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in bff5b7c2fe3e28b8661aa63586114985/info of bff5b7c2fe3e28b8661aa63586114985 into 78021ec04dd44e9e831c2dc9151c8f96(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:34,938 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for bff5b7c2fe3e28b8661aa63586114985: 2024-11-27T03:36:34,938 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985., storeName=bff5b7c2fe3e28b8661aa63586114985/info, priority=15, startTime=1732678594890; duration=0sec 2024-11-27T03:36:34,938 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:34,938 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bff5b7c2fe3e28b8661aa63586114985:info 2024-11-27T03:36:34,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/table/fda489065a734473b3cbd9769f948294 is 65, key is TestLogRolling-testLogRolling/table:state/1732678580230/Put/seqid=0 2024-11-27T03:36:34,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741857_1033 (size=5340) 2024-11-27T03:36:34,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741857_1033 (size=5340) 2024-11-27T03:36:34,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/table/fda489065a734473b3cbd9769f948294 2024-11-27T03:36:34,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/info/4e27caa49e2542e5914f50621a18ca61 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/info/4e27caa49e2542e5914f50621a18ca61 2024-11-27T03:36:34,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/info/4e27caa49e2542e5914f50621a18ca61, entries=30, sequenceid=17, filesize=9.7 K 2024-11-27T03:36:34,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/ns/ac759aa628f34bf4b5f32c4210407bd1 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/ns/ac759aa628f34bf4b5f32c4210407bd1 2024-11-27T03:36:34,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/ns/ac759aa628f34bf4b5f32c4210407bd1, entries=2, sequenceid=17, filesize=5.0 K 2024-11-27T03:36:34,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/table/fda489065a734473b3cbd9769f948294 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/table/fda489065a734473b3cbd9769f948294 2024-11-27T03:36:34,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/table/fda489065a734473b3cbd9769f948294, entries=2, sequenceid=17, filesize=5.2 K 2024-11-27T03:36:34,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 103ms, sequenceid=17, compaction requested=false 2024-11-27T03:36:34,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-27T03:36:35,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:35,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:35,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:35,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:36,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:34922 deadline: 1732678606158, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. is not online on 66d3634da959,34443,1732678578710 2024-11-27T03:36:36,184 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., hostname=66d3634da959,34443,1732678578710, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., hostname=66d3634da959,34443,1732678578710, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. is not online on 66d3634da959,34443,1732678578710 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-27T03:36:36,185 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., hostname=66d3634da959,34443,1732678578710, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5. is not online on 66d3634da959,34443,1732678578710 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-27T03:36:36,185 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732678579858.df698933c24cf07640381536ae02d6b5., hostname=66d3634da959,34443,1732678578710, seqNum=2 from cache 2024-11-27T03:36:36,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:36,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:36,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:36,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:37,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:37,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:37,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:37,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:38,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:38,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:38,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:38,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:39,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:39,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:39,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:39,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:39,915 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-27T03:36:39,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:39,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-27T03:36:40,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:40,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:40,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:40,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:41,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:41,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:41,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:41,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:42,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:42,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:42,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:42,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:43,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:43,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:43,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:43,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:44,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:44,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:44,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:44,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:45,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:45,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:45,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:45,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:46,218 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., hostname=66d3634da959,34443,1732678578710, seqNum=131] 2024-11-27T03:36:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:46,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:36:46,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/75b820e5accd4a828a8bf94971fa1d27 is 1080, key is row0097/info:/1732678606219/Put/seqid=0 2024-11-27T03:36:46,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741858_1034 (size=12516) 2024-11-27T03:36:46,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741858_1034 (size=12516) 2024-11-27T03:36:46,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/75b820e5accd4a828a8bf94971fa1d27 2024-11-27T03:36:46,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/75b820e5accd4a828a8bf94971fa1d27 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/75b820e5accd4a828a8bf94971fa1d27 2024-11-27T03:36:46,310 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/75b820e5accd4a828a8bf94971fa1d27, entries=7, sequenceid=141, filesize=12.2 K 2024-11-27T03:36:46,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for c3e92841af8e3280a2906309e831412a in 80ms, sequenceid=141, compaction requested=false 2024-11-27T03:36:46,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:46,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-27T03:36:46,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/0e4a04b311d04fd89205a8ab8c33eeb9 is 1080, key is row0104/info:/1732678606232/Put/seqid=0 2024-11-27T03:36:46,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741859_1035 (size=16828) 2024-11-27T03:36:46,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741859_1035 (size=16828) 2024-11-27T03:36:46,322 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/0e4a04b311d04fd89205a8ab8c33eeb9 2024-11-27T03:36:46,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/0e4a04b311d04fd89205a8ab8c33eeb9 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0e4a04b311d04fd89205a8ab8c33eeb9 2024-11-27T03:36:46,336 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0e4a04b311d04fd89205a8ab8c33eeb9, entries=11, sequenceid=155, filesize=16.4 K 2024-11-27T03:36:46,337 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for c3e92841af8e3280a2906309e831412a in 26ms, sequenceid=155, compaction requested=true 2024-11-27T03:36:46,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:46,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:46,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3e92841af8e3280a2906309e831412a:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:46,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:46,337 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:46,337 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-27T03:36:46,338 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 72328 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:46,338 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HStore(1541): c3e92841af8e3280a2906309e831412a/info is initiating minor compaction (all files) 2024-11-27T03:36:46,338 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3e92841af8e3280a2906309e831412a/info in TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:46,338 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/a4364375f84747989044a933d7f8a168, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/75b820e5accd4a828a8bf94971fa1d27, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0e4a04b311d04fd89205a8ab8c33eeb9] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp, totalSize=70.6 K 2024-11-27T03:36:46,339 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.Compactor(225): Compacting a4364375f84747989044a933d7f8a168, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732678592073 2024-11-27T03:36:46,339 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.Compactor(225): Compacting 75b820e5accd4a828a8bf94971fa1d27, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1732678606219 2024-11-27T03:36:46,339 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.Compactor(225): Compacting 0e4a04b311d04fd89205a8ab8c33eeb9, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732678606232 2024-11-27T03:36:46,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/58e50d70d116425ea1c57fb177794118 is 1080, key is row0115/info:/1732678606313/Put/seqid=0 2024-11-27T03:36:46,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741860_1036 (size=17906) 2024-11-27T03:36:46,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741860_1036 (size=17906) 2024-11-27T03:36:46,346 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/58e50d70d116425ea1c57fb177794118 2024-11-27T03:36:46,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/58e50d70d116425ea1c57fb177794118 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/58e50d70d116425ea1c57fb177794118 2024-11-27T03:36:46,353 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3e92841af8e3280a2906309e831412a#info#compaction#75 average throughput is 54.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:46,354 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/f449010327654acfa05577a5f1b61e22 is 1080, key is row0062/info:/1732678592073/Put/seqid=0 2024-11-27T03:36:46,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/58e50d70d116425ea1c57fb177794118, entries=12, sequenceid=170, filesize=17.5 K 2024-11-27T03:36:46,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for c3e92841af8e3280a2906309e831412a in 19ms, sequenceid=170, compaction requested=false 2024-11-27T03:36:46,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:46,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741861_1037 (size=62558) 2024-11-27T03:36:46,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741861_1037 (size=62558) 2024-11-27T03:36:46,367 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/f449010327654acfa05577a5f1b61e22 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/f449010327654acfa05577a5f1b61e22 2024-11-27T03:36:46,372 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3e92841af8e3280a2906309e831412a/info of c3e92841af8e3280a2906309e831412a into f449010327654acfa05577a5f1b61e22(size=61.1 K), total size for store is 78.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:46,372 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:46,372 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., storeName=c3e92841af8e3280a2906309e831412a/info, priority=13, startTime=1732678606337; duration=0sec 2024-11-27T03:36:46,373 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:46,373 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3e92841af8e3280a2906309e831412a:info 2024-11-27T03:36:46,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:46,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:46,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:46,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:47,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:47,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:47,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:47,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:48,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:48,359 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:36:48,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/3effc307fed444a38a0c4eda258698be is 1080, key is row0127/info:/1732678606338/Put/seqid=0 2024-11-27T03:36:48,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741862_1038 (size=12516) 2024-11-27T03:36:48,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741862_1038 (size=12516) 2024-11-27T03:36:48,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/3effc307fed444a38a0c4eda258698be 2024-11-27T03:36:48,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/3effc307fed444a38a0c4eda258698be as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/3effc307fed444a38a0c4eda258698be 2024-11-27T03:36:48,383 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/3effc307fed444a38a0c4eda258698be, entries=7, sequenceid=181, filesize=12.2 K 2024-11-27T03:36:48,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for c3e92841af8e3280a2906309e831412a in 25ms, sequenceid=181, compaction requested=true 2024-11-27T03:36:48,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:48,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3e92841af8e3280a2906309e831412a:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:48,384 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:48,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:48,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:48,385 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-27T03:36:48,386 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92980 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:48,386 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1541): c3e92841af8e3280a2906309e831412a/info is initiating minor compaction (all files) 2024-11-27T03:36:48,386 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3e92841af8e3280a2906309e831412a/info in TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:48,386 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/f449010327654acfa05577a5f1b61e22, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/58e50d70d116425ea1c57fb177794118, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/3effc307fed444a38a0c4eda258698be] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp, totalSize=90.8 K 2024-11-27T03:36:48,386 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting f449010327654acfa05577a5f1b61e22, keycount=53, bloomtype=ROW, size=61.1 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732678592073 2024-11-27T03:36:48,387 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 58e50d70d116425ea1c57fb177794118, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732678606313 2024-11-27T03:36:48,387 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3effc307fed444a38a0c4eda258698be, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732678606338 2024-11-27T03:36:48,389 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/65f20f58adc14990b4f4ccb8162e5ef4 is 1080, key is row0134/info:/1732678608360/Put/seqid=0 2024-11-27T03:36:48,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741863_1039 (size=17906) 2024-11-27T03:36:48,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741863_1039 (size=17906) 2024-11-27T03:36:48,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/65f20f58adc14990b4f4ccb8162e5ef4 2024-11-27T03:36:48,399 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3e92841af8e3280a2906309e831412a#info#compaction#78 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:48,400 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/0b02d5f49dc048559b50e64b11f3f2ca is 1080, key is row0062/info:/1732678592073/Put/seqid=0 2024-11-27T03:36:48,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/65f20f58adc14990b4f4ccb8162e5ef4 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/65f20f58adc14990b4f4ccb8162e5ef4 2024-11-27T03:36:48,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741864_1040 (size=83215) 2024-11-27T03:36:48,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741864_1040 (size=83215) 2024-11-27T03:36:48,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/65f20f58adc14990b4f4ccb8162e5ef4, entries=12, sequenceid=196, filesize=17.5 K 2024-11-27T03:36:48,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for c3e92841af8e3280a2906309e831412a in 24ms, sequenceid=196, compaction requested=false 2024-11-27T03:36:48,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:48,410 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/0b02d5f49dc048559b50e64b11f3f2ca as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0b02d5f49dc048559b50e64b11f3f2ca 2024-11-27T03:36:48,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:48,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-27T03:36:48,416 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/57b0bb69bed54fcdb021d9a4a6122c87 is 1080, key is row0146/info:/1732678608386/Put/seqid=0 2024-11-27T03:36:48,417 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3e92841af8e3280a2906309e831412a/info of c3e92841af8e3280a2906309e831412a into 0b02d5f49dc048559b50e64b11f3f2ca(size=81.3 K), total size for store is 98.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:48,417 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:48,417 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., storeName=c3e92841af8e3280a2906309e831412a/info, priority=13, startTime=1732678608384; duration=0sec 2024-11-27T03:36:48,418 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:48,418 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3e92841af8e3280a2906309e831412a:info 2024-11-27T03:36:48,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:48,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:48,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741865_1041 (size=17906) 2024-11-27T03:36:48,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741865_1041 (size=17906) 2024-11-27T03:36:48,426 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/57b0bb69bed54fcdb021d9a4a6122c87 2024-11-27T03:36:48,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/57b0bb69bed54fcdb021d9a4a6122c87 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/57b0bb69bed54fcdb021d9a4a6122c87 2024-11-27T03:36:48,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/57b0bb69bed54fcdb021d9a4a6122c87, entries=12, sequenceid=211, filesize=17.5 K 2024-11-27T03:36:48,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=3.15 KB/3228 for c3e92841af8e3280a2906309e831412a in 26ms, sequenceid=211, compaction requested=true 2024-11-27T03:36:48,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:48,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3e92841af8e3280a2906309e831412a:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:48,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:48,438 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:48,439 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:48,439 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1541): c3e92841af8e3280a2906309e831412a/info is initiating minor compaction (all files) 2024-11-27T03:36:48,439 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3e92841af8e3280a2906309e831412a/info in TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:48,440 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0b02d5f49dc048559b50e64b11f3f2ca, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/65f20f58adc14990b4f4ccb8162e5ef4, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/57b0bb69bed54fcdb021d9a4a6122c87] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp, totalSize=116.2 K 2024-11-27T03:36:48,440 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0b02d5f49dc048559b50e64b11f3f2ca, keycount=72, bloomtype=ROW, size=81.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732678592073 2024-11-27T03:36:48,440 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 65f20f58adc14990b4f4ccb8162e5ef4, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732678608360 2024-11-27T03:36:48,441 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 57b0bb69bed54fcdb021d9a4a6122c87, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732678608386 2024-11-27T03:36:48,452 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3e92841af8e3280a2906309e831412a#info#compaction#80 average throughput is 49.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:48,453 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/5f2545359aed4220a9dd1edfec4fd36c is 1080, key is row0062/info:/1732678592073/Put/seqid=0 2024-11-27T03:36:48,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741866_1042 (size=109197) 2024-11-27T03:36:48,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741866_1042 (size=109197) 2024-11-27T03:36:48,473 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/5f2545359aed4220a9dd1edfec4fd36c as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/5f2545359aed4220a9dd1edfec4fd36c 2024-11-27T03:36:48,479 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3e92841af8e3280a2906309e831412a/info of c3e92841af8e3280a2906309e831412a into 5f2545359aed4220a9dd1edfec4fd36c(size=106.6 K), total size for store is 106.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:48,479 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:48,479 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., storeName=c3e92841af8e3280a2906309e831412a/info, priority=13, startTime=1732678608438; duration=0sec 2024-11-27T03:36:48,479 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:48,479 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3e92841af8e3280a2906309e831412a:info 2024-11-27T03:36:48,504 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-27T03:36:48,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:48,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:49,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:49,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:49,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:49,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:50,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:50,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:50,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:50,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:36:50,439 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/9095d1f8c30d4797abfb3e3586680ba5 is 1080, key is row0158/info:/1732678608414/Put/seqid=0 2024-11-27T03:36:50,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741867_1043 (size=12516) 2024-11-27T03:36:50,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741867_1043 (size=12516) 2024-11-27T03:36:50,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/9095d1f8c30d4797abfb3e3586680ba5 2024-11-27T03:36:50,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/9095d1f8c30d4797abfb3e3586680ba5 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9095d1f8c30d4797abfb3e3586680ba5 2024-11-27T03:36:50,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9095d1f8c30d4797abfb3e3586680ba5, entries=7, sequenceid=223, filesize=12.2 K 2024-11-27T03:36:50,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for c3e92841af8e3280a2906309e831412a in 24ms, sequenceid=223, compaction requested=false 2024-11-27T03:36:50,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:50,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:50,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-27T03:36:50,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/37fb06f8307d4f84b72569dd21aa6653 is 1080, key is row0165/info:/1732678610436/Put/seqid=0 2024-11-27T03:36:50,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741868_1044 (size=20078) 2024-11-27T03:36:50,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741868_1044 (size=20078) 2024-11-27T03:36:50,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/37fb06f8307d4f84b72569dd21aa6653 2024-11-27T03:36:50,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/37fb06f8307d4f84b72569dd21aa6653 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/37fb06f8307d4f84b72569dd21aa6653 2024-11-27T03:36:50,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/37fb06f8307d4f84b72569dd21aa6653, entries=14, sequenceid=240, filesize=19.6 K 2024-11-27T03:36:50,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for c3e92841af8e3280a2906309e831412a in 36ms, sequenceid=240, compaction requested=true 2024-11-27T03:36:50,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:50,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3e92841af8e3280a2906309e831412a:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:50,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:50,496 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:50,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:50,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-27T03:36:50,497 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 141791 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:50,498 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1541): c3e92841af8e3280a2906309e831412a/info is initiating minor compaction (all files) 2024-11-27T03:36:50,498 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3e92841af8e3280a2906309e831412a/info in TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:50,498 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/5f2545359aed4220a9dd1edfec4fd36c, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9095d1f8c30d4797abfb3e3586680ba5, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/37fb06f8307d4f84b72569dd21aa6653] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp, totalSize=138.5 K 2024-11-27T03:36:50,498 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f2545359aed4220a9dd1edfec4fd36c, keycount=96, bloomtype=ROW, size=106.6 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732678592073 2024-11-27T03:36:50,499 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9095d1f8c30d4797abfb3e3586680ba5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1732678608414 2024-11-27T03:36:50,499 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 37fb06f8307d4f84b72569dd21aa6653, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732678610436 2024-11-27T03:36:50,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/756a0d00925b4817ae3dad36003ef1d3 is 1080, key is row0179/info:/1732678610473/Put/seqid=0 2024-11-27T03:36:50,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741869_1045 (size=19000) 2024-11-27T03:36:50,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741869_1045 (size=19000) 2024-11-27T03:36:50,514 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3e92841af8e3280a2906309e831412a#info#compaction#84 average throughput is 30.01 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:50,515 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/72d0c1af514448dcb55e68f8aa8e927b is 1080, key is row0062/info:/1732678592073/Put/seqid=0 2024-11-27T03:36:50,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741870_1046 (size=132069) 2024-11-27T03:36:50,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741870_1046 (size=132069) 2024-11-27T03:36:50,524 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/72d0c1af514448dcb55e68f8aa8e927b as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/72d0c1af514448dcb55e68f8aa8e927b 2024-11-27T03:36:50,530 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3e92841af8e3280a2906309e831412a/info of c3e92841af8e3280a2906309e831412a into 72d0c1af514448dcb55e68f8aa8e927b(size=129.0 K), total size for store is 129.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:50,530 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:50,530 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., storeName=c3e92841af8e3280a2906309e831412a/info, priority=13, startTime=1732678610496; duration=0sec 2024-11-27T03:36:50,530 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:50,530 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3e92841af8e3280a2906309e831412a:info 2024-11-27T03:36:50,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:50,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:50,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/756a0d00925b4817ae3dad36003ef1d3 2024-11-27T03:36:50,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/756a0d00925b4817ae3dad36003ef1d3 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/756a0d00925b4817ae3dad36003ef1d3 2024-11-27T03:36:50,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/756a0d00925b4817ae3dad36003ef1d3, entries=13, sequenceid=256, filesize=18.6 K 2024-11-27T03:36:50,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for c3e92841af8e3280a2906309e831412a in 424ms, sequenceid=256, compaction requested=false 2024-11-27T03:36:50,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:51,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:51,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:51,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:51,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:52,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:52,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:52,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:52,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:36:52,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/d84a1e10c2884443b7f9c11b3e7e8e3c is 1080, key is row0192/info:/1732678610499/Put/seqid=0 2024-11-27T03:36:52,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741871_1047 (size=12523) 2024-11-27T03:36:52,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741871_1047 (size=12523) 2024-11-27T03:36:52,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=267 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/d84a1e10c2884443b7f9c11b3e7e8e3c 2024-11-27T03:36:52,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/d84a1e10c2884443b7f9c11b3e7e8e3c as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/d84a1e10c2884443b7f9c11b3e7e8e3c 2024-11-27T03:36:52,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/d84a1e10c2884443b7f9c11b3e7e8e3c, entries=7, sequenceid=267, filesize=12.2 K 2024-11-27T03:36:52,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for c3e92841af8e3280a2906309e831412a in 28ms, sequenceid=267, compaction requested=true 2024-11-27T03:36:52,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:52,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3e92841af8e3280a2906309e831412a:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:52,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:52,546 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:52,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:52,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-27T03:36:52,547 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 163592 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:52,548 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1541): c3e92841af8e3280a2906309e831412a/info is initiating minor compaction (all files) 2024-11-27T03:36:52,548 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3e92841af8e3280a2906309e831412a/info in TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:52,548 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/72d0c1af514448dcb55e68f8aa8e927b, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/756a0d00925b4817ae3dad36003ef1d3, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/d84a1e10c2884443b7f9c11b3e7e8e3c] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp, totalSize=159.8 K 2024-11-27T03:36:52,548 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 72d0c1af514448dcb55e68f8aa8e927b, keycount=117, bloomtype=ROW, size=129.0 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732678592073 2024-11-27T03:36:52,549 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 756a0d00925b4817ae3dad36003ef1d3, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732678610473 2024-11-27T03:36:52,549 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting d84a1e10c2884443b7f9c11b3e7e8e3c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1732678610499 2024-11-27T03:36:52,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/b56a2b295ebd43a38187d179838b399a is 1080, key is row0199/info:/1732678612519/Put/seqid=0 2024-11-27T03:36:52,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741872_1048 (size=20092) 2024-11-27T03:36:52,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741872_1048 (size=20092) 2024-11-27T03:36:52,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/b56a2b295ebd43a38187d179838b399a 2024-11-27T03:36:52,563 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3e92841af8e3280a2906309e831412a#info#compaction#87 average throughput is 46.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:52,563 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/672a6d641b7f4322b2e3c4431ba527dd is 1080, key is row0062/info:/1732678592073/Put/seqid=0 2024-11-27T03:36:52,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/b56a2b295ebd43a38187d179838b399a as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/b56a2b295ebd43a38187d179838b399a 2024-11-27T03:36:52,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741873_1049 (size=153811) 2024-11-27T03:36:52,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741873_1049 (size=153811) 2024-11-27T03:36:52,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/b56a2b295ebd43a38187d179838b399a, entries=14, sequenceid=284, filesize=19.6 K 2024-11-27T03:36:52,573 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=10.51 KB/10760 for c3e92841af8e3280a2906309e831412a in 26ms, sequenceid=284, compaction requested=false 2024-11-27T03:36:52,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:52,575 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-27T03:36:52,575 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/672a6d641b7f4322b2e3c4431ba527dd as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/672a6d641b7f4322b2e3c4431ba527dd 2024-11-27T03:36:52,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/9a4a5556e2f049fb97cee537f4f4b9a7 is 1080, key is row0213/info:/1732678612549/Put/seqid=0 2024-11-27T03:36:52,582 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3e92841af8e3280a2906309e831412a/info of c3e92841af8e3280a2906309e831412a into 672a6d641b7f4322b2e3c4431ba527dd(size=150.2 K), total size for store is 169.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:52,582 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:52,582 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., storeName=c3e92841af8e3280a2906309e831412a/info, priority=13, startTime=1732678612546; duration=0sec 2024-11-27T03:36:52,582 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:52,582 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3e92841af8e3280a2906309e831412a:info 2024-11-27T03:36:52,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741874_1050 (size=17918) 2024-11-27T03:36:52,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741874_1050 (size=17918) 2024-11-27T03:36:52,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/9a4a5556e2f049fb97cee537f4f4b9a7 2024-11-27T03:36:52,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/9a4a5556e2f049fb97cee537f4f4b9a7 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9a4a5556e2f049fb97cee537f4f4b9a7 2024-11-27T03:36:52,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9a4a5556e2f049fb97cee537f4f4b9a7, entries=12, sequenceid=299, filesize=17.5 K 2024-11-27T03:36:52,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=0 B/0 for c3e92841af8e3280a2906309e831412a in 18ms, sequenceid=299, compaction requested=true 2024-11-27T03:36:52,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:52,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3e92841af8e3280a2906309e831412a:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:52,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:52,593 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:52,594 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 191821 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:52,594 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1541): c3e92841af8e3280a2906309e831412a/info is initiating minor compaction (all files) 2024-11-27T03:36:52,595 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3e92841af8e3280a2906309e831412a/info in TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:52,595 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/672a6d641b7f4322b2e3c4431ba527dd, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/b56a2b295ebd43a38187d179838b399a, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9a4a5556e2f049fb97cee537f4f4b9a7] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp, totalSize=187.3 K 2024-11-27T03:36:52,595 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 672a6d641b7f4322b2e3c4431ba527dd, keycount=137, bloomtype=ROW, size=150.2 K, encoding=NONE, compression=NONE, seqNum=267, earliestPutTs=1732678592073 2024-11-27T03:36:52,595 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting b56a2b295ebd43a38187d179838b399a, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1732678612519 2024-11-27T03:36:52,596 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9a4a5556e2f049fb97cee537f4f4b9a7, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732678612549 2024-11-27T03:36:52,607 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3e92841af8e3280a2906309e831412a#info#compaction#89 average throughput is 55.75 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:52,608 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/25d9ac7a3be54a109823bfb529232601 is 1080, key is row0062/info:/1732678592073/Put/seqid=0 2024-11-27T03:36:52,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741875_1051 (size=181975) 2024-11-27T03:36:52,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741875_1051 (size=181975) 2024-11-27T03:36:52,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:52,617 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/25d9ac7a3be54a109823bfb529232601 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/25d9ac7a3be54a109823bfb529232601 2024-11-27T03:36:52,622 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3e92841af8e3280a2906309e831412a/info of c3e92841af8e3280a2906309e831412a into 25d9ac7a3be54a109823bfb529232601(size=177.7 K), total size for store is 177.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:52,622 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:52,622 INFO [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., storeName=c3e92841af8e3280a2906309e831412a/info, priority=13, startTime=1732678612593; duration=0sec 2024-11-27T03:36:52,622 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:52,622 DEBUG [RS:0;66d3634da959:34443-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3e92841af8e3280a2906309e831412a:info 2024-11-27T03:36:52,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:53,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:53,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:53,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:53,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:54,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:54,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:54,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:54,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-27T03:36:54,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/95910d604fe1441fa931a6f7800108d1 is 1080, key is row0225/info:/1732678614576/Put/seqid=0 2024-11-27T03:36:54,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741876_1052 (size=12523) 2024-11-27T03:36:54,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741876_1052 (size=12523) 2024-11-27T03:36:54,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/95910d604fe1441fa931a6f7800108d1 2024-11-27T03:36:54,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/95910d604fe1441fa931a6f7800108d1 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/95910d604fe1441fa931a6f7800108d1 2024-11-27T03:36:54,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/95910d604fe1441fa931a6f7800108d1, entries=7, sequenceid=311, filesize=12.2 K 2024-11-27T03:36:54,615 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for c3e92841af8e3280a2906309e831412a in 22ms, sequenceid=311, compaction requested=false 2024-11-27T03:36:54,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:54,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34443 {}] regionserver.HRegion(8855): Flush requested on c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:54,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-27T03:36:54,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:54,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/e21be153db914e3da68532fbf62787d9 is 1080, key is row0232/info:/1732678614594/Put/seqid=0 2024-11-27T03:36:54,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:54,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741877_1053 (size=16839) 2024-11-27T03:36:54,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741877_1053 (size=16839) 2024-11-27T03:36:54,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/e21be153db914e3da68532fbf62787d9 2024-11-27T03:36:54,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/e21be153db914e3da68532fbf62787d9 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/e21be153db914e3da68532fbf62787d9 2024-11-27T03:36:54,645 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/e21be153db914e3da68532fbf62787d9, entries=11, sequenceid=325, filesize=16.4 K 2024-11-27T03:36:54,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=14.71 KB/15064 for c3e92841af8e3280a2906309e831412a in 30ms, sequenceid=325, compaction requested=true 2024-11-27T03:36:54,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:54,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c3e92841af8e3280a2906309e831412a:info, priority=-2147483648, current under compaction store size is 1 2024-11-27T03:36:54,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:54,646 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-27T03:36:54,648 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 211337 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-27T03:36:54,648 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HStore(1541): c3e92841af8e3280a2906309e831412a/info is initiating minor compaction (all files) 2024-11-27T03:36:54,648 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c3e92841af8e3280a2906309e831412a/info in TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:54,648 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/25d9ac7a3be54a109823bfb529232601, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/95910d604fe1441fa931a6f7800108d1, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/e21be153db914e3da68532fbf62787d9] into tmpdir=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp, totalSize=206.4 K 2024-11-27T03:36:54,648 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.Compactor(225): Compacting 25d9ac7a3be54a109823bfb529232601, keycount=163, bloomtype=ROW, size=177.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1732678592073 2024-11-27T03:36:54,649 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.Compactor(225): Compacting 95910d604fe1441fa931a6f7800108d1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732678614576 2024-11-27T03:36:54,649 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] compactions.Compactor(225): Compacting e21be153db914e3da68532fbf62787d9, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1732678614594 2024-11-27T03:36:54,662 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c3e92841af8e3280a2906309e831412a#info#compaction#92 average throughput is 46.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-27T03:36:54,663 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/31547428296143428b895441f7ed7f15 is 1080, key is row0062/info:/1732678592073/Put/seqid=0 2024-11-27T03:36:54,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741878_1054 (size=201503) 2024-11-27T03:36:54,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741878_1054 (size=201503) 2024-11-27T03:36:54,670 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/31547428296143428b895441f7ed7f15 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/31547428296143428b895441f7ed7f15 2024-11-27T03:36:54,676 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c3e92841af8e3280a2906309e831412a/info of c3e92841af8e3280a2906309e831412a into 31547428296143428b895441f7ed7f15(size=196.8 K), total size for store is 196.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-27T03:36:54,676 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:54,676 INFO [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., storeName=c3e92841af8e3280a2906309e831412a/info, priority=13, startTime=1732678614646; duration=0sec 2024-11-27T03:36:54,676 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-27T03:36:54,676 DEBUG [RS:0;66d3634da959:34443-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c3e92841af8e3280a2906309e831412a:info 2024-11-27T03:36:55,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:55,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:55,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:55,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:56,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:56,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:56,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:56,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:56,644 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-27T03:36:56,645 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C34443%2C1732678578710.1732678616644 2024-11-27T03:36:56,656 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:56,656 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:56,656 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:56,656 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:56,656 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:56,656 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710/66d3634da959%2C34443%2C1732678578710.1732678579335 with entries=318, filesize=310.38 KB; new WAL /user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710/66d3634da959%2C34443%2C1732678578710.1732678616644 2024-11-27T03:36:56,658 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43059:43059),(127.0.0.1/127.0.0.1:46189:46189)] 2024-11-27T03:36:56,658 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710/66d3634da959%2C34443%2C1732678578710.1732678579335 is not closed yet, will try archiving it next time 2024-11-27T03:36:56,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741833_1009 (size=317837) 2024-11-27T03:36:56,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741833_1009 (size=317837) 2024-11-27T03:36:56,663 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-27T03:36:56,667 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/info/4ac03fd49fef43aeb66d350d7793816e is 186, key is TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985./info:regioninfo/1732678594893/Put/seqid=0 2024-11-27T03:36:56,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741880_1056 (size=6153) 2024-11-27T03:36:56,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741880_1056 (size=6153) 2024-11-27T03:36:56,674 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/info/4ac03fd49fef43aeb66d350d7793816e 2024-11-27T03:36:56,678 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/.tmp/info/4ac03fd49fef43aeb66d350d7793816e as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/info/4ac03fd49fef43aeb66d350d7793816e 2024-11-27T03:36:56,682 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/info/4ac03fd49fef43aeb66d350d7793816e, entries=5, sequenceid=21, filesize=6.0 K 2024-11-27T03:36:56,683 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-11-27T03:36:56,683 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-27T03:36:56,683 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c3e92841af8e3280a2906309e831412a 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-27T03:36:56,687 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/99338f4df9b94db7a18a9ce25e675f68 is 1080, key is row0243/info:/1732678614618/Put/seqid=0 2024-11-27T03:36:56,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741881_1057 (size=20092) 2024-11-27T03:36:56,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741881_1057 (size=20092) 2024-11-27T03:36:56,694 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/99338f4df9b94db7a18a9ce25e675f68 2024-11-27T03:36:56,700 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/.tmp/info/99338f4df9b94db7a18a9ce25e675f68 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/99338f4df9b94db7a18a9ce25e675f68 2024-11-27T03:36:56,704 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/99338f4df9b94db7a18a9ce25e675f68, entries=14, sequenceid=343, filesize=19.6 K 2024-11-27T03:36:56,705 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for c3e92841af8e3280a2906309e831412a in 22ms, sequenceid=343, compaction requested=false 2024-11-27T03:36:56,705 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c3e92841af8e3280a2906309e831412a: 2024-11-27T03:36:56,705 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for bff5b7c2fe3e28b8661aa63586114985: 2024-11-27T03:36:56,705 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C34443%2C1732678578710.1732678616705 2024-11-27T03:36:56,710 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:56,710 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:56,710 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:56,710 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:56,710 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:56,710 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710/66d3634da959%2C34443%2C1732678578710.1732678616644 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710/66d3634da959%2C34443%2C1732678578710.1732678616705 2024-11-27T03:36:56,711 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46189:46189),(127.0.0.1/127.0.0.1:43059:43059)] 2024-11-27T03:36:56,711 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710/66d3634da959%2C34443%2C1732678578710.1732678616644 is not closed yet, will try archiving it next time 2024-11-27T03:36:56,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741879_1055 (size=731) 2024-11-27T03:36:56,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741879_1055 (size=731) 2024-11-27T03:36:56,712 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710/66d3634da959%2C34443%2C1732678578710.1732678579335 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/oldWALs/66d3634da959%2C34443%2C1732678578710.1732678579335 2024-11-27T03:36:56,712 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-27T03:36:56,713 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/WALs/66d3634da959,34443,1732678578710/66d3634da959%2C34443%2C1732678578710.1732678616644 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/oldWALs/66d3634da959%2C34443%2C1732678578710.1732678616644 2024-11-27T03:36:56,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-27T03:36:56,813 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:36:56,813 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:36:56,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:56,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:56,813 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-27T03:36:56,813 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-27T03:36:56,813 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1699877167, stopped=false 2024-11-27T03:36:56,813 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=66d3634da959,38227,1732678578531 2024-11-27T03:36:56,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:36:56,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:36:56,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:56,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:56,873 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:36:56,874 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:36:56,874 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:36:56,874 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:56,874 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '66d3634da959,34443,1732678578710' ***** 2024-11-27T03:36:56,874 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-27T03:36:56,874 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:36:56,874 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-27T03:36:56,875 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(3091): Received CLOSE for c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(3091): Received CLOSE for bff5b7c2fe3e28b8661aa63586114985 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(959): stopping server 66d3634da959,34443,1732678578710 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;66d3634da959:34443. 2024-11-27T03:36:56,875 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c3e92841af8e3280a2906309e831412a, disabling compactions & flushes 2024-11-27T03:36:56,875 DEBUG [RS:0;66d3634da959:34443 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:36:56,875 DEBUG [RS:0;66d3634da959:34443 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:56,875 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:56,875 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-27T03:36:56,875 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. after waiting 0 ms 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-27T03:36:56,875 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-27T03:36:56,875 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-27T03:36:56,876 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-27T03:36:56,876 DEBUG [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, c3e92841af8e3280a2906309e831412a=TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a., bff5b7c2fe3e28b8661aa63586114985=TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985.} 2024-11-27T03:36:56,876 DEBUG [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, bff5b7c2fe3e28b8661aa63586114985, c3e92841af8e3280a2906309e831412a 2024-11-27T03:36:56,876 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:36:56,876 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:36:56,876 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:36:56,876 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:36:56,876 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:36:56,876 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5->hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/2c4d9126f708411d9a7ad5ef88ca4823-top, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-d41cc11c412c48939289631143116878, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/a4364375f84747989044a933d7f8a168, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-51eefa78f2994b429e98aa86ab1f5334, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/75b820e5accd4a828a8bf94971fa1d27, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/f449010327654acfa05577a5f1b61e22, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0e4a04b311d04fd89205a8ab8c33eeb9, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/58e50d70d116425ea1c57fb177794118, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0b02d5f49dc048559b50e64b11f3f2ca, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/3effc307fed444a38a0c4eda258698be, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/65f20f58adc14990b4f4ccb8162e5ef4, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/5f2545359aed4220a9dd1edfec4fd36c, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/57b0bb69bed54fcdb021d9a4a6122c87, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9095d1f8c30d4797abfb3e3586680ba5, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/72d0c1af514448dcb55e68f8aa8e927b, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/37fb06f8307d4f84b72569dd21aa6653, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/756a0d00925b4817ae3dad36003ef1d3, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/672a6d641b7f4322b2e3c4431ba527dd, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/d84a1e10c2884443b7f9c11b3e7e8e3c, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/b56a2b295ebd43a38187d179838b399a, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/25d9ac7a3be54a109823bfb529232601, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9a4a5556e2f049fb97cee537f4f4b9a7, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/95910d604fe1441fa931a6f7800108d1, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/e21be153db914e3da68532fbf62787d9] to archive 2024-11-27T03:36:56,877 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T03:36:56,879 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:56,880 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-d41cc11c412c48939289631143116878 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-d41cc11c412c48939289631143116878 2024-11-27T03:36:56,880 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-27T03:36:56,881 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:36:56,881 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:36:56,881 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678616876Running coprocessor pre-close hooks at 1732678616876Disabling compacts and flushes for region at 1732678616876Disabling writes for close at 1732678616876Writing region close event to WAL at 1732678616877 (+1 ms)Running coprocessor post-close hooks at 1732678616881 (+4 ms)Closed at 1732678616881 2024-11-27T03:36:56,881 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/a4364375f84747989044a933d7f8a168 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/a4364375f84747989044a933d7f8a168 2024-11-27T03:36:56,881 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-27T03:36:56,882 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-51eefa78f2994b429e98aa86ab1f5334 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/TestLogRolling-testLogRolling=df698933c24cf07640381536ae02d6b5-51eefa78f2994b429e98aa86ab1f5334 2024-11-27T03:36:56,883 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/75b820e5accd4a828a8bf94971fa1d27 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/75b820e5accd4a828a8bf94971fa1d27 2024-11-27T03:36:56,884 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/f449010327654acfa05577a5f1b61e22 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/f449010327654acfa05577a5f1b61e22 2024-11-27T03:36:56,885 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0e4a04b311d04fd89205a8ab8c33eeb9 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0e4a04b311d04fd89205a8ab8c33eeb9 2024-11-27T03:36:56,886 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/58e50d70d116425ea1c57fb177794118 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/58e50d70d116425ea1c57fb177794118 2024-11-27T03:36:56,887 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0b02d5f49dc048559b50e64b11f3f2ca to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/0b02d5f49dc048559b50e64b11f3f2ca 2024-11-27T03:36:56,888 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/3effc307fed444a38a0c4eda258698be to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/3effc307fed444a38a0c4eda258698be 2024-11-27T03:36:56,889 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/65f20f58adc14990b4f4ccb8162e5ef4 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/65f20f58adc14990b4f4ccb8162e5ef4 2024-11-27T03:36:56,890 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/5f2545359aed4220a9dd1edfec4fd36c to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/5f2545359aed4220a9dd1edfec4fd36c 2024-11-27T03:36:56,891 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/57b0bb69bed54fcdb021d9a4a6122c87 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/57b0bb69bed54fcdb021d9a4a6122c87 2024-11-27T03:36:56,892 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9095d1f8c30d4797abfb3e3586680ba5 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9095d1f8c30d4797abfb3e3586680ba5 2024-11-27T03:36:56,893 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/72d0c1af514448dcb55e68f8aa8e927b to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/72d0c1af514448dcb55e68f8aa8e927b 2024-11-27T03:36:56,894 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/37fb06f8307d4f84b72569dd21aa6653 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/37fb06f8307d4f84b72569dd21aa6653 2024-11-27T03:36:56,896 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/756a0d00925b4817ae3dad36003ef1d3 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/756a0d00925b4817ae3dad36003ef1d3 2024-11-27T03:36:56,897 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/672a6d641b7f4322b2e3c4431ba527dd to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/672a6d641b7f4322b2e3c4431ba527dd 2024-11-27T03:36:56,898 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/d84a1e10c2884443b7f9c11b3e7e8e3c to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/d84a1e10c2884443b7f9c11b3e7e8e3c 2024-11-27T03:36:56,899 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/b56a2b295ebd43a38187d179838b399a to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/b56a2b295ebd43a38187d179838b399a 2024-11-27T03:36:56,900 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/25d9ac7a3be54a109823bfb529232601 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/25d9ac7a3be54a109823bfb529232601 2024-11-27T03:36:56,901 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9a4a5556e2f049fb97cee537f4f4b9a7 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/9a4a5556e2f049fb97cee537f4f4b9a7 2024-11-27T03:36:56,902 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/95910d604fe1441fa931a6f7800108d1 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/95910d604fe1441fa931a6f7800108d1 2024-11-27T03:36:56,903 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/e21be153db914e3da68532fbf62787d9 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/info/e21be153db914e3da68532fbf62787d9 2024-11-27T03:36:56,904 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=66d3634da959:38227 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-27T03:36:56,904 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a4364375f84747989044a933d7f8a168=42984, 75b820e5accd4a828a8bf94971fa1d27=12516, f449010327654acfa05577a5f1b61e22=62558, 0e4a04b311d04fd89205a8ab8c33eeb9=16828, 58e50d70d116425ea1c57fb177794118=17906, 0b02d5f49dc048559b50e64b11f3f2ca=83215, 3effc307fed444a38a0c4eda258698be=12516, 65f20f58adc14990b4f4ccb8162e5ef4=17906, 5f2545359aed4220a9dd1edfec4fd36c=109197, 57b0bb69bed54fcdb021d9a4a6122c87=17906, 9095d1f8c30d4797abfb3e3586680ba5=12516, 72d0c1af514448dcb55e68f8aa8e927b=132069, 37fb06f8307d4f84b72569dd21aa6653=20078, 756a0d00925b4817ae3dad36003ef1d3=19000, 672a6d641b7f4322b2e3c4431ba527dd=153811, d84a1e10c2884443b7f9c11b3e7e8e3c=12523, b56a2b295ebd43a38187d179838b399a=20092, 25d9ac7a3be54a109823bfb529232601=181975, 9a4a5556e2f049fb97cee537f4f4b9a7=17918, 95910d604fe1441fa931a6f7800108d1=12523, e21be153db914e3da68532fbf62787d9=16839] 2024-11-27T03:36:56,908 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/c3e92841af8e3280a2906309e831412a/recovered.edits/346.seqid, newMaxSeqId=346, maxSeqId=130 2024-11-27T03:36:56,909 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:56,909 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c3e92841af8e3280a2906309e831412a: Waiting for close lock at 1732678616875Running coprocessor pre-close hooks at 1732678616875Disabling compacts and flushes for region at 1732678616875Disabling writes for close at 1732678616875Writing region close event to WAL at 1732678616905 (+30 ms)Running coprocessor post-close hooks at 1732678616908 (+3 ms)Closed at 1732678616908 2024-11-27T03:36:56,909 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732678594178.c3e92841af8e3280a2906309e831412a. 2024-11-27T03:36:56,909 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bff5b7c2fe3e28b8661aa63586114985, disabling compactions & flushes 2024-11-27T03:36:56,909 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. 2024-11-27T03:36:56,909 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. 2024-11-27T03:36:56,909 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. after waiting 0 ms 2024-11-27T03:36:56,909 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. 2024-11-27T03:36:56,909 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5->hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/df698933c24cf07640381536ae02d6b5/info/2c4d9126f708411d9a7ad5ef88ca4823-bottom] to archive 2024-11-27T03:36:56,910 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-27T03:36:56,912 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5 to hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/archive/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/info/2c4d9126f708411d9a7ad5ef88ca4823.df698933c24cf07640381536ae02d6b5 2024-11-27T03:36:56,912 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-27T03:36:56,916 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/data/default/TestLogRolling-testLogRolling/bff5b7c2fe3e28b8661aa63586114985/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-27T03:36:56,916 INFO [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. 2024-11-27T03:36:56,916 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bff5b7c2fe3e28b8661aa63586114985: Waiting for close lock at 1732678616909Running coprocessor pre-close hooks at 1732678616909Disabling compacts and flushes for region at 1732678616909Disabling writes for close at 1732678616909Writing region close event to WAL at 1732678616912 (+3 ms)Running coprocessor post-close hooks at 1732678616916 (+4 ms)Closed at 1732678616916 2024-11-27T03:36:56,916 DEBUG [RS_CLOSE_REGION-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732678594178.bff5b7c2fe3e28b8661aa63586114985. 2024-11-27T03:36:57,076 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(976): stopping server 66d3634da959,34443,1732678578710; all regions closed. 2024-11-27T03:36:57,076 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,077 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,077 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,077 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,077 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741834_1010 (size=8107) 2024-11-27T03:36:57,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741834_1010 (size=8107) 2024-11-27T03:36:57,081 DEBUG [RS:0;66d3634da959:34443 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/oldWALs 2024-11-27T03:36:57,081 INFO [RS:0;66d3634da959:34443 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C34443%2C1732678578710.meta:.meta(num 1732678579702) 2024-11-27T03:36:57,081 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,081 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,081 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,082 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,082 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741882_1058 (size=780) 2024-11-27T03:36:57,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741882_1058 (size=780) 2024-11-27T03:36:57,085 DEBUG [RS:0;66d3634da959:34443 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/oldWALs 2024-11-27T03:36:57,085 INFO [RS:0;66d3634da959:34443 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C34443%2C1732678578710:(num 1732678616705) 2024-11-27T03:36:57,085 DEBUG [RS:0;66d3634da959:34443 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:36:57,085 INFO [RS:0;66d3634da959:34443 {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:36:57,085 INFO [RS:0;66d3634da959:34443 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:36:57,086 INFO [RS:0;66d3634da959:34443 {}] hbase.ChoreService(370): Chore service for: regionserver/66d3634da959:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-27T03:36:57,086 INFO [RS:0;66d3634da959:34443 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:36:57,086 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:36:57,086 INFO [RS:0;66d3634da959:34443 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34443 2024-11-27T03:36:57,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d3634da959,34443,1732678578710 2024-11-27T03:36:57,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:36:57,097 INFO [RS:0;66d3634da959:34443 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:36:57,108 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d3634da959,34443,1732678578710] 2024-11-27T03:36:57,118 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/66d3634da959,34443,1732678578710 already deleted, retry=false 2024-11-27T03:36:57,118 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 66d3634da959,34443,1732678578710 expired; onlineServers=0 2024-11-27T03:36:57,118 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '66d3634da959,38227,1732678578531' ***** 2024-11-27T03:36:57,118 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-27T03:36:57,118 INFO [M:0;66d3634da959:38227 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:36:57,118 INFO [M:0;66d3634da959:38227 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:36:57,118 DEBUG [M:0;66d3634da959:38227 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-27T03:36:57,119 DEBUG [M:0;66d3634da959:38227 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-27T03:36:57,119 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-27T03:36:57,119 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678579077 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678579077,5,FailOnTimeoutGroup] 2024-11-27T03:36:57,119 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678579077 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678579077,5,FailOnTimeoutGroup] 2024-11-27T03:36:57,119 INFO [M:0;66d3634da959:38227 {}] hbase.ChoreService(370): Chore service for: master/66d3634da959:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-27T03:36:57,119 INFO [M:0;66d3634da959:38227 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:36:57,119 DEBUG [M:0;66d3634da959:38227 {}] master.HMaster(1795): Stopping service threads 2024-11-27T03:36:57,119 INFO [M:0;66d3634da959:38227 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-27T03:36:57,119 INFO [M:0;66d3634da959:38227 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:36:57,119 INFO [M:0;66d3634da959:38227 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-27T03:36:57,119 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-27T03:36:57,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-27T03:36:57,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:57,129 DEBUG [M:0;66d3634da959:38227 {}] zookeeper.ZKUtil(347): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-27T03:36:57,129 WARN [M:0;66d3634da959:38227 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-27T03:36:57,129 INFO [M:0;66d3634da959:38227 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/.lastflushedseqids 2024-11-27T03:36:57,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741883_1059 (size=228) 2024-11-27T03:36:57,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741883_1059 (size=228) 2024-11-27T03:36:57,135 INFO [M:0;66d3634da959:38227 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-27T03:36:57,135 INFO [M:0;66d3634da959:38227 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-27T03:36:57,135 DEBUG [M:0;66d3634da959:38227 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:36:57,135 INFO [M:0;66d3634da959:38227 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:57,135 DEBUG [M:0;66d3634da959:38227 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:57,135 DEBUG [M:0;66d3634da959:38227 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:36:57,135 DEBUG [M:0;66d3634da959:38227 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:57,135 INFO [M:0;66d3634da959:38227 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.36 KB 2024-11-27T03:36:57,153 DEBUG [M:0;66d3634da959:38227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20911f13cd274ed1bfb63caf085aff20 is 82, key is hbase:meta,,1/info:regioninfo/1732678579726/Put/seqid=0 2024-11-27T03:36:57,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741884_1060 (size=5672) 2024-11-27T03:36:57,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741884_1060 (size=5672) 2024-11-27T03:36:57,158 INFO [M:0;66d3634da959:38227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20911f13cd274ed1bfb63caf085aff20 2024-11-27T03:36:57,176 DEBUG [M:0;66d3634da959:38227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/097b06077ace4a8c8e33372b6362b34d is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732678580234/Put/seqid=0 2024-11-27T03:36:57,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741885_1061 (size=7089) 2024-11-27T03:36:57,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741885_1061 (size=7089) 2024-11-27T03:36:57,181 INFO [M:0;66d3634da959:38227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/097b06077ace4a8c8e33372b6362b34d 2024-11-27T03:36:57,185 INFO [M:0;66d3634da959:38227 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 097b06077ace4a8c8e33372b6362b34d 2024-11-27T03:36:57,198 DEBUG [M:0;66d3634da959:38227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb7dbc31379e44199b3e14424083be9e is 69, key is 66d3634da959,34443,1732678578710/rs:state/1732678579186/Put/seqid=0 2024-11-27T03:36:57,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741886_1062 (size=5156) 2024-11-27T03:36:57,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741886_1062 (size=5156) 2024-11-27T03:36:57,205 INFO [M:0;66d3634da959:38227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb7dbc31379e44199b3e14424083be9e 2024-11-27T03:36:57,205 INFO [regionserver/66d3634da959:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:36:57,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:36:57,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34443-0x1017a5cecf70001, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:36:57,208 INFO [RS:0;66d3634da959:34443 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:36:57,208 INFO [RS:0;66d3634da959:34443 {}] regionserver.HRegionServer(1031): Exiting; stopping=66d3634da959,34443,1732678578710; zookeeper connection closed. 2024-11-27T03:36:57,208 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7666266d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7666266d 2024-11-27T03:36:57,208 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-27T03:36:57,228 DEBUG [M:0;66d3634da959:38227 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f514bfd6e6c4dba86c3488380aca69b is 52, key is load_balancer_on/state:d/1732678579852/Put/seqid=0 2024-11-27T03:36:57,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741887_1063 (size=5056) 2024-11-27T03:36:57,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741887_1063 (size=5056) 2024-11-27T03:36:57,233 INFO [M:0;66d3634da959:38227 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f514bfd6e6c4dba86c3488380aca69b 2024-11-27T03:36:57,238 DEBUG [M:0;66d3634da959:38227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/20911f13cd274ed1bfb63caf085aff20 as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/20911f13cd274ed1bfb63caf085aff20 2024-11-27T03:36:57,243 INFO [M:0;66d3634da959:38227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/20911f13cd274ed1bfb63caf085aff20, entries=8, sequenceid=125, filesize=5.5 K 2024-11-27T03:36:57,244 DEBUG [M:0;66d3634da959:38227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/097b06077ace4a8c8e33372b6362b34d as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/097b06077ace4a8c8e33372b6362b34d 2024-11-27T03:36:57,249 INFO [M:0;66d3634da959:38227 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 097b06077ace4a8c8e33372b6362b34d 2024-11-27T03:36:57,249 INFO [M:0;66d3634da959:38227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/097b06077ace4a8c8e33372b6362b34d, entries=13, sequenceid=125, filesize=6.9 K 2024-11-27T03:36:57,250 DEBUG [M:0;66d3634da959:38227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb7dbc31379e44199b3e14424083be9e as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/eb7dbc31379e44199b3e14424083be9e 2024-11-27T03:36:57,255 INFO [M:0;66d3634da959:38227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/eb7dbc31379e44199b3e14424083be9e, entries=1, sequenceid=125, filesize=5.0 K 2024-11-27T03:36:57,256 DEBUG [M:0;66d3634da959:38227 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4f514bfd6e6c4dba86c3488380aca69b as hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4f514bfd6e6c4dba86c3488380aca69b 2024-11-27T03:36:57,261 INFO [M:0;66d3634da959:38227 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44999/user/jenkins/test-data/9ce582f4-3092-cb61-9cdf-ca2c630e1a42/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4f514bfd6e6c4dba86c3488380aca69b, entries=1, sequenceid=125, filesize=4.9 K 2024-11-27T03:36:57,262 INFO [M:0;66d3634da959:38227 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=125, compaction requested=false 2024-11-27T03:36:57,264 INFO [M:0;66d3634da959:38227 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:57,264 DEBUG [M:0;66d3634da959:38227 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678617135Disabling compacts and flushes for region at 1732678617135Disabling writes for close at 1732678617135Obtaining lock to block concurrent updates at 1732678617135Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732678617135Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64816, getOffHeapSize=0, getCellsCount=148 at 1732678617135Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732678617136 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732678617136Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732678617152 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732678617152Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732678617161 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732678617175 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732678617176 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732678617185 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732678617198 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732678617198Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732678617209 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732678617227 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732678617227Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b825a77: reopening flushed file at 1732678617237 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@661e7f07: reopening flushed file at 1732678617243 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48ee2747: reopening flushed file at 1732678617249 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55797f48: reopening flushed file at 1732678617255 (+6 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.30 KB/64816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=125, compaction requested=false at 1732678617262 (+7 ms)Writing region close event to WAL at 1732678617264 (+2 ms)Closed at 1732678617264 2024-11-27T03:36:57,265 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,265 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,265 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,265 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,265 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:36:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43267 is added to blk_1073741830_1006 (size=61320) 2024-11-27T03:36:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35741 is added to blk_1073741830_1006 (size=61320) 2024-11-27T03:36:57,267 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:36:57,268 INFO [M:0;66d3634da959:38227 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-27T03:36:57,268 INFO [M:0;66d3634da959:38227 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38227 2024-11-27T03:36:57,268 INFO [M:0;66d3634da959:38227 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:36:57,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:36:57,376 INFO [M:0;66d3634da959:38227 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:36:57,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38227-0x1017a5cecf70000, quorum=127.0.0.1:56638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:36:57,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2998c4ad{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:36:57,380 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9caeb33{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:36:57,381 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:36:57,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc4994c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:36:57,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@216b0c63{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/hadoop.log.dir/,STOPPED} 2024-11-27T03:36:57,384 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:36:57,384 WARN [BP-216229593-172.17.0.3-1732678576154 heartbeating to localhost/127.0.0.1:44999 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:36:57,384 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:36:57,384 WARN [BP-216229593-172.17.0.3-1732678576154 heartbeating to localhost/127.0.0.1:44999 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-216229593-172.17.0.3-1732678576154 (Datanode Uuid 404add00-c613-4013-8a55-fbb048b0c417) service to localhost/127.0.0.1:44999 2024-11-27T03:36:57,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/data/data3/current/BP-216229593-172.17.0.3-1732678576154 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:36:57,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/data/data4/current/BP-216229593-172.17.0.3-1732678576154 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:36:57,385 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:36:57,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c9ba3de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:36:57,388 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70d55230{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:36:57,388 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:36:57,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3523e770{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:36:57,389 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31e425dc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/hadoop.log.dir/,STOPPED} 2024-11-27T03:36:57,390 WARN [BP-216229593-172.17.0.3-1732678576154 heartbeating to localhost/127.0.0.1:44999 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:36:57,390 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:36:57,390 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:36:57,390 WARN [BP-216229593-172.17.0.3-1732678576154 heartbeating to localhost/127.0.0.1:44999 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-216229593-172.17.0.3-1732678576154 (Datanode Uuid dbba5afb-11a3-4dbd-b8ec-f6de5bafee9d) service to localhost/127.0.0.1:44999 2024-11-27T03:36:57,391 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/data/data1/current/BP-216229593-172.17.0.3-1732678576154 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:36:57,391 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/cluster_3574b597-7676-37a4-7e3b-f2e7d247dabf/data/data2/current/BP-216229593-172.17.0.3-1732678576154 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:36:57,392 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:36:57,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@132e4184{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:36:57,398 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@53298b3d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:36:57,399 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:36:57,399 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7305dd28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:36:57,399 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@622d58de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/hadoop.log.dir/,STOPPED} 2024-11-27T03:36:57,405 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-27T03:36:57,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:57,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:57,433 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-27T03:36:57,440 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=233 (was 209) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44999 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:44999 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44999 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44999 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44999 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44999 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44999 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44999 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=521 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=47 (was 61), ProcessCount=11 (was 11), AvailableMemoryMB=7754 (was 7816) 2024-11-27T03:36:57,448 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=233, OpenFileDescriptor=521, MaxFileDescriptor=1048576, SystemLoadAverage=47, ProcessCount=11, AvailableMemoryMB=7754 2024-11-27T03:36:57,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-27T03:36:57,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/hadoop.log.dir so I do NOT create it in target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1 2024-11-27T03:36:57,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5696d9bb-7b37-6de8-6d15-3613f4af0b14/hadoop.tmp.dir so I do NOT create it in target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1 2024-11-27T03:36:57,448 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7, deleteOnExit=true 2024-11-27T03:36:57,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-27T03:36:57,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/test.cache.data in system properties and HBase conf 2024-11-27T03:36:57,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/hadoop.tmp.dir in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/hadoop.log.dir in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-27T03:36:57,449 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-27T03:36:57,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:36:57,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-27T03:36:57,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/nfs.dump.dir in system properties and HBase conf 2024-11-27T03:36:57,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/java.io.tmpdir in system properties and HBase conf 2024-11-27T03:36:57,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-27T03:36:57,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-27T03:36:57,450 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-27T03:36:57,463 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:36:57,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:57,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:57,858 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:36:57,861 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:36:57,862 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:36:57,862 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:36:57,862 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:36:57,863 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:36:57,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@259cffcc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:36:57,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3910812a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:36:57,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a86d190{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/java.io.tmpdir/jetty-localhost-41179-hadoop-hdfs-3_4_1-tests_jar-_-any-4611249499481559078/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:36:57,955 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a47ab0e{HTTP/1.1, (http/1.1)}{localhost:41179} 2024-11-27T03:36:57,955 INFO [Time-limited test {}] server.Server(415): Started @298999ms 2024-11-27T03:36:57,965 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-27T03:36:58,196 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:36:58,198 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:36:58,199 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:36:58,199 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:36:58,199 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-27T03:36:58,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23266789{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:36:58,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54008d53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:36:58,290 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@115f614b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/java.io.tmpdir/jetty-localhost-41369-hadoop-hdfs-3_4_1-tests_jar-_-any-12853716310717049474/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:36:58,291 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d45b9b7{HTTP/1.1, (http/1.1)}{localhost:41369} 2024-11-27T03:36:58,291 INFO [Time-limited test {}] server.Server(415): Started @299335ms 2024-11-27T03:36:58,291 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:36:58,315 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-27T03:36:58,317 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-27T03:36:58,318 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-27T03:36:58,318 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-27T03:36:58,318 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-27T03:36:58,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36507c53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/hadoop.log.dir/,AVAILABLE} 2024-11-27T03:36:58,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fa9d1ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-27T03:36:58,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2eb38aaf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/java.io.tmpdir/jetty-localhost-36989-hadoop-hdfs-3_4_1-tests_jar-_-any-632532589485112165/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:36:58,411 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59532081{HTTP/1.1, (http/1.1)}{localhost:36989} 2024-11-27T03:36:58,411 INFO [Time-limited test {}] server.Server(415): Started @299455ms 2024-11-27T03:36:58,412 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-27T03:36:58,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:58,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:58,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:58,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:59,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:59,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:59,474 WARN [Thread-2518 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/data/data1/current/BP-1957594165-172.17.0.3-1732678617468/current, will proceed with Du for space computation calculation, 2024-11-27T03:36:59,474 WARN [Thread-2519 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/data/data2/current/BP-1957594165-172.17.0.3-1732678617468/current, will proceed with Du for space computation calculation, 2024-11-27T03:36:59,493 WARN [Thread-2482 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:36:59,495 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x553274b314f9c59a with lease ID 0x527c8cfcbfe341da: Processing first storage report for DS-cb6b32e8-7014-4841-8375-cf5bc008f815 from datanode DatanodeRegistration(127.0.0.1:45065, datanodeUuid=61d1eb3d-1e65-44b4-b5f1-54e389a8b373, infoPort=38879, infoSecurePort=0, ipcPort=46233, storageInfo=lv=-57;cid=testClusterID;nsid=169987553;c=1732678617468) 2024-11-27T03:36:59,495 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x553274b314f9c59a with lease ID 0x527c8cfcbfe341da: from storage DS-cb6b32e8-7014-4841-8375-cf5bc008f815 node DatanodeRegistration(127.0.0.1:45065, datanodeUuid=61d1eb3d-1e65-44b4-b5f1-54e389a8b373, infoPort=38879, infoSecurePort=0, ipcPort=46233, storageInfo=lv=-57;cid=testClusterID;nsid=169987553;c=1732678617468), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:36:59,495 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x553274b314f9c59a with lease ID 0x527c8cfcbfe341da: Processing first storage report for DS-1d797d36-eca9-4e89-b1f5-21753661e6d5 from datanode DatanodeRegistration(127.0.0.1:45065, datanodeUuid=61d1eb3d-1e65-44b4-b5f1-54e389a8b373, infoPort=38879, infoSecurePort=0, ipcPort=46233, storageInfo=lv=-57;cid=testClusterID;nsid=169987553;c=1732678617468) 2024-11-27T03:36:59,495 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x553274b314f9c59a with lease ID 0x527c8cfcbfe341da: from storage DS-1d797d36-eca9-4e89-b1f5-21753661e6d5 node DatanodeRegistration(127.0.0.1:45065, datanodeUuid=61d1eb3d-1e65-44b4-b5f1-54e389a8b373, infoPort=38879, infoSecurePort=0, ipcPort=46233, storageInfo=lv=-57;cid=testClusterID;nsid=169987553;c=1732678617468), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:36:59,598 WARN [Thread-2529 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/data/data3/current/BP-1957594165-172.17.0.3-1732678617468/current, will proceed with Du for space computation calculation, 2024-11-27T03:36:59,598 WARN [Thread-2530 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/data/data4/current/BP-1957594165-172.17.0.3-1732678617468/current, will proceed with Du for space computation calculation, 2024-11-27T03:36:59,620 WARN [Thread-2505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-27T03:36:59,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:59,622 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb6e2511a01ec1e98 with lease ID 0x527c8cfcbfe341db: Processing first storage report for DS-4c102d87-1a28-4f13-8976-df50677f71da from datanode DatanodeRegistration(127.0.0.1:43577, datanodeUuid=0cebbe7c-0b91-403d-85ce-ace55edae811, infoPort=46501, infoSecurePort=0, ipcPort=38959, storageInfo=lv=-57;cid=testClusterID;nsid=169987553;c=1732678617468) 2024-11-27T03:36:59,622 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb6e2511a01ec1e98 with lease ID 0x527c8cfcbfe341db: from storage DS-4c102d87-1a28-4f13-8976-df50677f71da node DatanodeRegistration(127.0.0.1:43577, datanodeUuid=0cebbe7c-0b91-403d-85ce-ace55edae811, infoPort=46501, infoSecurePort=0, ipcPort=38959, storageInfo=lv=-57;cid=testClusterID;nsid=169987553;c=1732678617468), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:36:59,622 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb6e2511a01ec1e98 with lease ID 0x527c8cfcbfe341db: Processing first storage report for DS-a13fb237-a6be-460c-8411-4a3272cad3d9 from datanode DatanodeRegistration(127.0.0.1:43577, datanodeUuid=0cebbe7c-0b91-403d-85ce-ace55edae811, infoPort=46501, infoSecurePort=0, ipcPort=38959, storageInfo=lv=-57;cid=testClusterID;nsid=169987553;c=1732678617468) 2024-11-27T03:36:59,622 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb6e2511a01ec1e98 with lease ID 0x527c8cfcbfe341db: from storage DS-a13fb237-a6be-460c-8411-4a3272cad3d9 node DatanodeRegistration(127.0.0.1:43577, datanodeUuid=0cebbe7c-0b91-403d-85ce-ace55edae811, infoPort=46501, infoSecurePort=0, ipcPort=38959, storageInfo=lv=-57;cid=testClusterID;nsid=169987553;c=1732678617468), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-27T03:36:59,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:36:59,642 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1 2024-11-27T03:36:59,644 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/zookeeper_0, clientPort=57719, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-27T03:36:59,645 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57719 2024-11-27T03:36:59,645 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:59,646 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:59,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:36:59,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741825_1001 (size=7) 2024-11-27T03:36:59,654 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e with version=8 2024-11-27T03:36:59,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45395/user/jenkins/test-data/c167bc0e-e1cf-ea46-0ab1-8d2c6afe61ba/hbase-staging 2024-11-27T03:36:59,656 INFO [Time-limited test {}] client.ConnectionUtils(128): master/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:36:59,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:59,656 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:59,657 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:36:59,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:59,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:36:59,657 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-27T03:36:59,657 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:36:59,657 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38221 2024-11-27T03:36:59,659 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38221 connecting to ZooKeeper ensemble=127.0.0.1:57719 2024-11-27T03:36:59,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:382210x0, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:36:59,715 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38221-0x1017a5d8da70000 connected 2024-11-27T03:36:59,803 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:59,806 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:59,809 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:36:59,809 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e, hbase.cluster.distributed=false 2024-11-27T03:36:59,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:36:59,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38221 2024-11-27T03:36:59,813 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38221 2024-11-27T03:36:59,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38221 2024-11-27T03:36:59,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38221 2024-11-27T03:36:59,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38221 2024-11-27T03:36:59,832 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/66d3634da959:0 server-side Connection retries=45 2024-11-27T03:36:59,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:59,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:59,832 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-27T03:36:59,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-27T03:36:59,832 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-27T03:36:59,832 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-27T03:36:59,832 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-27T03:36:59,833 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34383 2024-11-27T03:36:59,834 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34383 connecting to ZooKeeper ensemble=127.0.0.1:57719 2024-11-27T03:36:59,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:59,835 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:59,844 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343830x0, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-27T03:36:59,845 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34383-0x1017a5d8da70001 connected 2024-11-27T03:36:59,845 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:36:59,845 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-27T03:36:59,846 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-27T03:36:59,846 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-27T03:36:59,847 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-27T03:36:59,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34383 2024-11-27T03:36:59,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34383 2024-11-27T03:36:59,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34383 2024-11-27T03:36:59,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34383 2024-11-27T03:36:59,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34383 2024-11-27T03:36:59,862 DEBUG [M:0;66d3634da959:38221 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;66d3634da959:38221 2024-11-27T03:36:59,863 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/66d3634da959,38221,1732678619656 2024-11-27T03:36:59,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:36:59,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:36:59,873 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/66d3634da959,38221,1732678619656 2024-11-27T03:36:59,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-27T03:36:59,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:59,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:59,884 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-27T03:36:59,884 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/66d3634da959,38221,1732678619656 from backup master directory 2024-11-27T03:36:59,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:36:59,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/66d3634da959,38221,1732678619656 2024-11-27T03:36:59,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-27T03:36:59,894 WARN [master/66d3634da959:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:36:59,894 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=66d3634da959,38221,1732678619656 2024-11-27T03:36:59,899 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/hbase.id] with ID: dbbdf274-9acf-4265-9717-49f9046ddca4 2024-11-27T03:36:59,899 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/.tmp/hbase.id 2024-11-27T03:36:59,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:36:59,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741826_1002 (size=42) 2024-11-27T03:36:59,907 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/.tmp/hbase.id]:[hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/hbase.id] 2024-11-27T03:36:59,916 INFO [master/66d3634da959:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:36:59,916 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-27T03:36:59,918 INFO [master/66d3634da959:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-27T03:36:59,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:59,928 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:36:59,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:36:59,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741827_1003 (size=196) 2024-11-27T03:36:59,935 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-27T03:36:59,937 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-27T03:36:59,937 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:36:59,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:36:59,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741828_1004 (size=1189) 2024-11-27T03:36:59,947 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store 2024-11-27T03:36:59,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:36:59,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741829_1005 (size=34) 2024-11-27T03:36:59,955 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:36:59,955 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:36:59,955 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:59,955 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:59,955 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:36:59,955 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:59,955 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:36:59,955 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678619955Disabling compacts and flushes for region at 1732678619955Disabling writes for close at 1732678619955Writing region close event to WAL at 1732678619955Closed at 1732678619955 2024-11-27T03:36:59,956 WARN [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/.initializing 2024-11-27T03:36:59,956 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/WALs/66d3634da959,38221,1732678619656 2024-11-27T03:36:59,959 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C38221%2C1732678619656, suffix=, logDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/WALs/66d3634da959,38221,1732678619656, archiveDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/oldWALs, maxLogs=10 2024-11-27T03:36:59,959 INFO [master/66d3634da959:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C38221%2C1732678619656.1732678619959 2024-11-27T03:36:59,965 INFO [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/WALs/66d3634da959,38221,1732678619656/66d3634da959%2C38221%2C1732678619656.1732678619959 2024-11-27T03:36:59,968 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38879:38879),(127.0.0.1/127.0.0.1:46501:46501)] 2024-11-27T03:36:59,969 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:36:59,969 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:36:59,969 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,969 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-27T03:36:59,972 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:59,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:36:59,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-27T03:36:59,974 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:59,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:36:59,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-27T03:36:59,975 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:59,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:36:59,976 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,976 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-27T03:36:59,976 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:36:59,977 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-27T03:36:59,977 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,977 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,978 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,979 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,979 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,979 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-27T03:36:59,980 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-27T03:36:59,982 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:36:59,983 INFO [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=802814, jitterRate=0.020830824971199036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-27T03:36:59,984 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732678619970Initializing all the Stores at 1732678619970Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678619971 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678619971Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678619971Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678619971Cleaning up temporary data from old regions at 1732678619979 (+8 ms)Region opened successfully at 1732678619984 (+5 ms) 2024-11-27T03:36:59,984 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-27T03:36:59,987 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d709bca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:36:59,989 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-27T03:36:59,989 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-27T03:36:59,989 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-27T03:36:59,989 INFO [master/66d3634da959:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-27T03:36:59,990 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-27T03:36:59,990 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-27T03:36:59,990 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-27T03:36:59,992 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-27T03:36:59,993 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-27T03:37:00,002 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-27T03:37:00,002 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-27T03:37:00,003 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-27T03:37:00,013 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-27T03:37:00,013 INFO [master/66d3634da959:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-27T03:37:00,015 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-27T03:37:00,023 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-27T03:37:00,025 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-27T03:37:00,034 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-27T03:37:00,039 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-27T03:37:00,052 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-27T03:37:00,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:37:00,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-27T03:37:00,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:00,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:00,067 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=66d3634da959,38221,1732678619656, sessionid=0x1017a5d8da70000, setting cluster-up flag (Was=false) 2024-11-27T03:37:00,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:00,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:00,118 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-27T03:37:00,119 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,38221,1732678619656 2024-11-27T03:37:00,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:00,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:00,171 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-27T03:37:00,175 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=66d3634da959,38221,1732678619656 2024-11-27T03:37:00,177 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-27T03:37:00,178 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-27T03:37:00,178 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-27T03:37:00,178 INFO [master/66d3634da959:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-27T03:37:00,179 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 66d3634da959,38221,1732678619656 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-27T03:37:00,180 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:37:00,180 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:37:00,180 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:37:00,180 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/66d3634da959:0, corePoolSize=5, maxPoolSize=5 2024-11-27T03:37:00,180 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/66d3634da959:0, corePoolSize=10, maxPoolSize=10 2024-11-27T03:37:00,180 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,180 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:37:00,180 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,181 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732678650181 2024-11-27T03:37:00,181 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-27T03:37:00,181 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-27T03:37:00,181 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-27T03:37:00,181 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-27T03:37:00,181 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-27T03:37:00,181 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-27T03:37:00,181 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,181 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:37:00,181 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-27T03:37:00,181 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-27T03:37:00,181 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-27T03:37:00,182 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-27T03:37:00,182 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-27T03:37:00,182 INFO [master/66d3634da959:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-27T03:37:00,182 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678620182,5,FailOnTimeoutGroup] 2024-11-27T03:37:00,182 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678620182,5,FailOnTimeoutGroup] 2024-11-27T03:37:00,182 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,182 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-27T03:37:00,182 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,182 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,182 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:37:00,182 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-27T03:37:00,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:37:00,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741831_1007 (size=1321) 2024-11-27T03:37:00,188 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-27T03:37:00,189 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e 2024-11-27T03:37:00,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:37:00,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741832_1008 (size=32) 2024-11-27T03:37:00,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:37:00,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:37:00,197 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:37:00,197 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:37:00,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:37:00,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:37:00,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:37:00,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:37:00,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:37:00,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:37:00,200 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:37:00,200 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:37:00,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:37:00,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:37:00,201 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:37:00,201 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:37:00,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:37:00,201 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:37:00,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740 2024-11-27T03:37:00,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740 2024-11-27T03:37:00,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:37:00,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:37:00,204 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:37:00,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:37:00,206 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-27T03:37:00,206 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809950, jitterRate=0.029904797673225403}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:37:00,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732678620195Initializing all the Stores at 1732678620196 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678620196Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678620196Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678620196Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678620196Cleaning up temporary data from old regions at 1732678620203 (+7 ms)Region opened successfully at 1732678620207 (+4 ms) 2024-11-27T03:37:00,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:37:00,207 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:37:00,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:37:00,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:37:00,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:37:00,207 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:37:00,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678620207Disabling compacts and flushes for region at 1732678620207Disabling writes for close at 1732678620207Writing region close event to WAL at 1732678620207Closed at 1732678620207 2024-11-27T03:37:00,208 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:37:00,208 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-27T03:37:00,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-27T03:37:00,209 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:37:00,210 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-27T03:37:00,254 INFO [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(746): ClusterId : dbbdf274-9acf-4265-9717-49f9046ddca4 2024-11-27T03:37:00,254 DEBUG [RS:0;66d3634da959:34383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-27T03:37:00,268 DEBUG [RS:0;66d3634da959:34383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-27T03:37:00,268 DEBUG [RS:0;66d3634da959:34383 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-27T03:37:00,278 DEBUG [RS:0;66d3634da959:34383 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-27T03:37:00,279 DEBUG [RS:0;66d3634da959:34383 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35af343a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=66d3634da959/172.17.0.3:0 2024-11-27T03:37:00,295 DEBUG [RS:0;66d3634da959:34383 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;66d3634da959:34383 2024-11-27T03:37:00,295 INFO [RS:0;66d3634da959:34383 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-27T03:37:00,295 INFO [RS:0;66d3634da959:34383 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-27T03:37:00,295 DEBUG [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-27T03:37:00,296 INFO [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(2659): reportForDuty to master=66d3634da959,38221,1732678619656 with port=34383, startcode=1732678619831 2024-11-27T03:37:00,296 DEBUG [RS:0;66d3634da959:34383 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-27T03:37:00,298 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56897, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-27T03:37:00,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38221 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 66d3634da959,34383,1732678619831 2024-11-27T03:37:00,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38221 {}] master.ServerManager(517): Registering regionserver=66d3634da959,34383,1732678619831 2024-11-27T03:37:00,300 DEBUG [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e 2024-11-27T03:37:00,300 DEBUG [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44465 2024-11-27T03:37:00,300 DEBUG [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-27T03:37:00,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:37:00,308 DEBUG [RS:0;66d3634da959:34383 {}] zookeeper.ZKUtil(111): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/66d3634da959,34383,1732678619831 2024-11-27T03:37:00,308 WARN [RS:0;66d3634da959:34383 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-27T03:37:00,308 INFO [RS:0;66d3634da959:34383 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:37:00,308 DEBUG [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/66d3634da959,34383,1732678619831 2024-11-27T03:37:00,308 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [66d3634da959,34383,1732678619831] 2024-11-27T03:37:00,311 INFO [RS:0;66d3634da959:34383 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-27T03:37:00,312 INFO [RS:0;66d3634da959:34383 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-27T03:37:00,313 INFO [RS:0;66d3634da959:34383 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-27T03:37:00,313 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,313 INFO [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-27T03:37:00,314 INFO [RS:0;66d3634da959:34383 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-27T03:37:00,314 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/66d3634da959:0, corePoolSize=2, maxPoolSize=2 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/66d3634da959:0, corePoolSize=1, maxPoolSize=1 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:37:00,314 DEBUG [RS:0;66d3634da959:34383 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/66d3634da959:0, corePoolSize=3, maxPoolSize=3 2024-11-27T03:37:00,315 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,315 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,315 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,315 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,315 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,315 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,34383,1732678619831-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:37:00,333 INFO [RS:0;66d3634da959:34383 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-27T03:37:00,334 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,34383,1732678619831-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,334 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,334 INFO [RS:0;66d3634da959:34383 {}] regionserver.Replication(171): 66d3634da959,34383,1732678619831 started 2024-11-27T03:37:00,345 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,345 INFO [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(1482): Serving as 66d3634da959,34383,1732678619831, RpcServer on 66d3634da959/172.17.0.3:34383, sessionid=0x1017a5d8da70001 2024-11-27T03:37:00,345 DEBUG [RS:0;66d3634da959:34383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-27T03:37:00,345 DEBUG [RS:0;66d3634da959:34383 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 66d3634da959,34383,1732678619831 2024-11-27T03:37:00,345 DEBUG [RS:0;66d3634da959:34383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,34383,1732678619831' 2024-11-27T03:37:00,345 DEBUG [RS:0;66d3634da959:34383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-27T03:37:00,346 DEBUG [RS:0;66d3634da959:34383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-27T03:37:00,347 DEBUG [RS:0;66d3634da959:34383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-27T03:37:00,347 DEBUG [RS:0;66d3634da959:34383 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-27T03:37:00,347 DEBUG [RS:0;66d3634da959:34383 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 66d3634da959,34383,1732678619831 2024-11-27T03:37:00,347 DEBUG [RS:0;66d3634da959:34383 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '66d3634da959,34383,1732678619831' 2024-11-27T03:37:00,347 DEBUG [RS:0;66d3634da959:34383 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-27T03:37:00,347 DEBUG [RS:0;66d3634da959:34383 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-27T03:37:00,348 DEBUG [RS:0;66d3634da959:34383 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-27T03:37:00,348 INFO [RS:0;66d3634da959:34383 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-27T03:37:00,348 INFO [RS:0;66d3634da959:34383 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-27T03:37:00,360 WARN [66d3634da959:38221 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-27T03:37:00,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:37:00,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:37:00,451 INFO [RS:0;66d3634da959:34383 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C34383%2C1732678619831, suffix=, logDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/66d3634da959,34383,1732678619831, archiveDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/oldWALs, maxLogs=32 2024-11-27T03:37:00,453 INFO [RS:0;66d3634da959:34383 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C34383%2C1732678619831.1732678620452 2024-11-27T03:37:00,464 INFO [RS:0;66d3634da959:34383 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/66d3634da959,34383,1732678619831/66d3634da959%2C34383%2C1732678619831.1732678620452 2024-11-27T03:37:00,466 DEBUG [RS:0;66d3634da959:34383 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38879:38879),(127.0.0.1/127.0.0.1:46501:46501)] 2024-11-27T03:37:00,611 DEBUG [66d3634da959:38221 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-27T03:37:00,612 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=66d3634da959,34383,1732678619831 2024-11-27T03:37:00,616 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,34383,1732678619831, state=OPENING 2024-11-27T03:37:00,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:37:00,623 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-27T03:37:00,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.meta.1732678429952.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:37:00,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:00,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:00,634 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-27T03:37:00,634 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:37:00,634 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:37:00,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,34383,1732678619831}] 2024-11-27T03:37:00,788 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-27T03:37:00,792 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48523, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-27T03:37:00,800 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-27T03:37:00,800 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:37:00,802 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=66d3634da959%2C34383%2C1732678619831.meta, suffix=.meta, logDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/66d3634da959,34383,1732678619831, archiveDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/oldWALs, maxLogs=32 2024-11-27T03:37:00,803 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 66d3634da959%2C34383%2C1732678619831.meta.1732678620803.meta 2024-11-27T03:37:00,808 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/66d3634da959,34383,1732678619831/66d3634da959%2C34383%2C1732678619831.meta.1732678620803.meta 2024-11-27T03:37:00,812 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46501:46501),(127.0.0.1/127.0.0.1:38879:38879)] 2024-11-27T03:37:00,816 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-27T03:37:00,816 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-27T03:37:00,816 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-27T03:37:00,817 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-27T03:37:00,817 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-27T03:37:00,817 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-27T03:37:00,817 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-27T03:37:00,817 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-27T03:37:00,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-27T03:37:00,819 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-27T03:37:00,819 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:37:00,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:37:00,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-27T03:37:00,820 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-27T03:37:00,820 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:37:00,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:37:00,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-27T03:37:00,820 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-27T03:37:00,820 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:37:00,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:37:00,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-27T03:37:00,822 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-27T03:37:00,822 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-27T03:37:00,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-27T03:37:00,822 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-27T03:37:00,823 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740 2024-11-27T03:37:00,824 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740 2024-11-27T03:37:00,825 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-27T03:37:00,825 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-27T03:37:00,825 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-27T03:37:00,826 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-27T03:37:00,827 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=761608, jitterRate=-0.031566321849823}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-27T03:37:00,827 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-27T03:37:00,827 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732678620817Writing region info on filesystem at 1732678620817Initializing all the Stores at 1732678620818 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678620818Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678620818Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732678620818Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732678620818Cleaning up temporary data from old regions at 1732678620825 (+7 ms)Running coprocessor post-open hooks at 1732678620827 (+2 ms)Region opened successfully at 1732678620827 2024-11-27T03:37:00,828 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732678620788 2024-11-27T03:37:00,830 DEBUG [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-27T03:37:00,830 INFO [RS_OPEN_META-regionserver/66d3634da959:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-27T03:37:00,831 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=66d3634da959,34383,1732678619831 2024-11-27T03:37:00,831 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 66d3634da959,34383,1732678619831, state=OPEN 2024-11-27T03:37:00,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:37:00,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-27T03:37:00,916 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=66d3634da959,34383,1732678619831 2024-11-27T03:37:00,916 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:37:00,916 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-27T03:37:00,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-27T03:37:00,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=66d3634da959,34383,1732678619831 in 282 msec 2024-11-27T03:37:00,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-27T03:37:00,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 712 msec 2024-11-27T03:37:00,925 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-27T03:37:00,925 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-27T03:37:00,927 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:37:00,928 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,34383,1732678619831, seqNum=-1] 2024-11-27T03:37:00,928 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:37:00,929 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45811, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:37:00,936 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 757 msec 2024-11-27T03:37:00,937 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732678620937, completionTime=-1 2024-11-27T03:37:00,937 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-27T03:37:00,937 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-27T03:37:00,939 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-27T03:37:00,939 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732678680939 2024-11-27T03:37:00,939 INFO [master/66d3634da959:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732678740939 2024-11-27T03:37:00,940 INFO [master/66d3634da959:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-27T03:37:00,940 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38221,1732678619656-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,940 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38221,1732678619656-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,940 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38221,1732678619656-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,940 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-66d3634da959:38221, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,940 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,940 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,942 DEBUG [master/66d3634da959:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-27T03:37:00,946 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.051sec 2024-11-27T03:37:00,946 INFO [master/66d3634da959:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-27T03:37:00,946 INFO [master/66d3634da959:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-27T03:37:00,946 INFO [master/66d3634da959:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-27T03:37:00,946 INFO [master/66d3634da959:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-27T03:37:00,946 INFO [master/66d3634da959:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-27T03:37:00,946 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38221,1732678619656-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-27T03:37:00,946 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38221,1732678619656-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-27T03:37:00,949 DEBUG [master/66d3634da959:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-27T03:37:00,949 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-27T03:37:00,949 INFO [master/66d3634da959:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=66d3634da959,38221,1732678619656-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-27T03:37:00,953 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13830cb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:37:00,953 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 66d3634da959,38221,-1 for getting cluster id 2024-11-27T03:37:00,954 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-27T03:37:00,955 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dbbdf274-9acf-4265-9717-49f9046ddca4' 2024-11-27T03:37:00,955 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-27T03:37:00,955 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dbbdf274-9acf-4265-9717-49f9046ddca4" 2024-11-27T03:37:00,955 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66f039d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:37:00,955 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [66d3634da959,38221,-1] 2024-11-27T03:37:00,956 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-27T03:37:00,956 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:37:00,956 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37758, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-27T03:37:00,957 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ab592d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-27T03:37:00,957 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-27T03:37:00,958 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=66d3634da959,34383,1732678619831, seqNum=-1] 2024-11-27T03:37:00,958 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-27T03:37:00,959 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57432, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-27T03:37:00,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=66d3634da959,38221,1732678619656 2024-11-27T03:37:00,961 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-27T03:37:00,963 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-27T03:37:00,963 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-27T03:37:00,965 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/test.com,8080,1, archiveDir=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/oldWALs, maxLogs=32 2024-11-27T03:37:00,965 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732678620965 2024-11-27T03:37:00,969 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/test.com,8080,1/test.com%2C8080%2C1.1732678620965 2024-11-27T03:37:00,970 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38879:38879),(127.0.0.1/127.0.0.1:46501:46501)] 2024-11-27T03:37:00,971 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732678620971 2024-11-27T03:37:00,977 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:00,977 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:00,977 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:00,977 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:00,977 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:00,977 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/test.com,8080,1/test.com%2C8080%2C1.1732678620965 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/test.com,8080,1/test.com%2C8080%2C1.1732678620971 2024-11-27T03:37:00,978 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38879:38879),(127.0.0.1/127.0.0.1:46501:46501)] 2024-11-27T03:37:00,978 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/test.com,8080,1/test.com%2C8080%2C1.1732678620965 is not closed yet, will try archiving it next time 2024-11-27T03:37:00,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741835_1011 (size=93) 2024-11-27T03:37:00,979 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:00,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741835_1011 (size=93) 2024-11-27T03:37:00,980 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:00,980 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:00,980 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:00,980 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:00,980 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/WALs/test.com,8080,1/test.com%2C8080%2C1.1732678620965 to hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/oldWALs/test.com%2C8080%2C1.1732678620965 2024-11-27T03:37:00,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741836_1012 (size=93) 2024-11-27T03:37:00,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741836_1012 (size=93) 2024-11-27T03:37:00,983 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/oldWALs 2024-11-27T03:37:00,983 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732678620971) 2024-11-27T03:37:00,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-27T03:37:00,983 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:37:00,983 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:37:00,983 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:37:00,983 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:37:00,983 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-27T03:37:00,983 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-27T03:37:00,983 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1824166202, stopped=false 2024-11-27T03:37:00,983 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=66d3634da959,38221,1732678619656 2024-11-27T03:37:01,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:37:01,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-27T03:37:01,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:01,002 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:01,002 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:37:01,002 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-27T03:37:01,003 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:37:01,003 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:37:01,003 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:37:01,003 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-27T03:37:01,003 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '66d3634da959,34383,1732678619831' ***** 2024-11-27T03:37:01,003 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-27T03:37:01,003 INFO [RS:0;66d3634da959:34383 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-27T03:37:01,003 INFO [RS:0;66d3634da959:34383 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-27T03:37:01,003 INFO [RS:0;66d3634da959:34383 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-27T03:37:01,003 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-27T03:37:01,004 INFO [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(959): stopping server 66d3634da959,34383,1732678619831 2024-11-27T03:37:01,004 INFO [RS:0;66d3634da959:34383 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:37:01,004 INFO [RS:0;66d3634da959:34383 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;66d3634da959:34383. 2024-11-27T03:37:01,004 DEBUG [RS:0;66d3634da959:34383 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-27T03:37:01,004 DEBUG [RS:0;66d3634da959:34383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:37:01,004 INFO [RS:0;66d3634da959:34383 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-27T03:37:01,004 INFO [RS:0;66d3634da959:34383 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-27T03:37:01,004 INFO [RS:0;66d3634da959:34383 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-27T03:37:01,004 INFO [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-27T03:37:01,004 INFO [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-27T03:37:01,004 DEBUG [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-27T03:37:01,004 DEBUG [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-27T03:37:01,004 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-27T03:37:01,005 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-27T03:37:01,005 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-27T03:37:01,005 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-27T03:37:01,005 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-27T03:37:01,005 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-27T03:37:01,023 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740/.tmp/ns/e801f554f9e94dbba6c10cbe59ccca58 is 43, key is default/ns:d/1732678620930/Put/seqid=0 2024-11-27T03:37:01,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741837_1013 (size=5153) 2024-11-27T03:37:01,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741837_1013 (size=5153) 2024-11-27T03:37:01,027 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740/.tmp/ns/e801f554f9e94dbba6c10cbe59ccca58 2024-11-27T03:37:01,032 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740/.tmp/ns/e801f554f9e94dbba6c10cbe59ccca58 as hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740/ns/e801f554f9e94dbba6c10cbe59ccca58 2024-11-27T03:37:01,035 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740/ns/e801f554f9e94dbba6c10cbe59ccca58, entries=2, sequenceid=6, filesize=5.0 K 2024-11-27T03:37:01,037 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false 2024-11-27T03:37:01,040 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-27T03:37:01,040 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-27T03:37:01,040 INFO [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-27T03:37:01,040 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732678621004Running coprocessor pre-close hooks at 1732678621004Disabling compacts and flushes for region at 1732678621004Disabling writes for close at 1732678621005 (+1 ms)Obtaining lock to block concurrent updates at 1732678621005Preparing flush snapshotting stores in 1588230740 at 1732678621005Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732678621005Flushing stores of hbase:meta,,1.1588230740 at 1732678621006 (+1 ms)Flushing 1588230740/ns: creating writer at 1732678621006Flushing 1588230740/ns: appending metadata at 1732678621022 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732678621022Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5faae00e: reopening flushed file at 1732678621031 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false at 1732678621037 (+6 ms)Writing region close event to WAL at 1732678621037Running coprocessor post-close hooks at 1732678621040 (+3 ms)Closed at 1732678621040 2024-11-27T03:37:01,041 DEBUG [RS_CLOSE_META-regionserver/66d3634da959:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-27T03:37:01,205 INFO [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(976): stopping server 66d3634da959,34383,1732678619831; all regions closed. 2024-11-27T03:37:01,206 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,206 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,207 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,207 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,207 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741834_1010 (size=1152) 2024-11-27T03:37:01,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741834_1010 (size=1152) 2024-11-27T03:37:01,217 DEBUG [RS:0;66d3634da959:34383 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/oldWALs 2024-11-27T03:37:01,217 INFO [RS:0;66d3634da959:34383 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C34383%2C1732678619831.meta:.meta(num 1732678620803) 2024-11-27T03:37:01,218 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,218 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,218 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,218 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,219 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741833_1009 (size=93) 2024-11-27T03:37:01,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741833_1009 (size=93) 2024-11-27T03:37:01,223 DEBUG [RS:0;66d3634da959:34383 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/oldWALs 2024-11-27T03:37:01,223 INFO [RS:0;66d3634da959:34383 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 66d3634da959%2C34383%2C1732678619831:(num 1732678620452) 2024-11-27T03:37:01,223 DEBUG [RS:0;66d3634da959:34383 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-27T03:37:01,223 INFO [RS:0;66d3634da959:34383 {}] regionserver.LeaseManager(133): Closed leases 2024-11-27T03:37:01,223 INFO [RS:0;66d3634da959:34383 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:37:01,223 INFO [RS:0;66d3634da959:34383 {}] hbase.ChoreService(370): Chore service for: regionserver/66d3634da959:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-27T03:37:01,224 INFO [RS:0;66d3634da959:34383 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:37:01,224 INFO [regionserver/66d3634da959:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:37:01,224 INFO [RS:0;66d3634da959:34383 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34383 2024-11-27T03:37:01,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/66d3634da959,34383,1732678619831 2024-11-27T03:37:01,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-27T03:37:01,234 INFO [RS:0;66d3634da959:34383 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:37:01,244 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [66d3634da959,34383,1732678619831] 2024-11-27T03:37:01,255 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/66d3634da959,34383,1732678619831 already deleted, retry=false 2024-11-27T03:37:01,255 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 66d3634da959,34383,1732678619831 expired; onlineServers=0 2024-11-27T03:37:01,255 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '66d3634da959,38221,1732678619656' ***** 2024-11-27T03:37:01,255 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-27T03:37:01,255 INFO [M:0;66d3634da959:38221 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-27T03:37:01,255 INFO [M:0;66d3634da959:38221 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-27T03:37:01,255 DEBUG [M:0;66d3634da959:38221 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-27T03:37:01,255 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-27T03:37:01,255 DEBUG [M:0;66d3634da959:38221 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-27T03:37:01,255 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678620182 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.small.0-1732678620182,5,FailOnTimeoutGroup] 2024-11-27T03:37:01,255 DEBUG [master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678620182 {}] cleaner.HFileCleaner(306): Exit Thread[master/66d3634da959:0:becomeActiveMaster-HFileCleaner.large.0-1732678620182,5,FailOnTimeoutGroup] 2024-11-27T03:37:01,255 INFO [M:0;66d3634da959:38221 {}] hbase.ChoreService(370): Chore service for: master/66d3634da959:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-27T03:37:01,255 INFO [M:0;66d3634da959:38221 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-27T03:37:01,255 DEBUG [M:0;66d3634da959:38221 {}] master.HMaster(1795): Stopping service threads 2024-11-27T03:37:01,255 INFO [M:0;66d3634da959:38221 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-27T03:37:01,256 INFO [M:0;66d3634da959:38221 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-27T03:37:01,256 INFO [M:0;66d3634da959:38221 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-27T03:37:01,256 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-27T03:37:01,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-27T03:37:01,265 DEBUG [M:0;66d3634da959:38221 {}] zookeeper.ZKUtil(347): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-27T03:37:01,265 WARN [M:0;66d3634da959:38221 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-27T03:37:01,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-27T03:37:01,266 INFO [M:0;66d3634da959:38221 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/.lastflushedseqids 2024-11-27T03:37:01,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741838_1014 (size=99) 2024-11-27T03:37:01,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741838_1014 (size=99) 2024-11-27T03:37:01,273 INFO [M:0;66d3634da959:38221 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-27T03:37:01,273 INFO [M:0;66d3634da959:38221 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-27T03:37:01,273 DEBUG [M:0;66d3634da959:38221 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-27T03:37:01,273 INFO [M:0;66d3634da959:38221 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:37:01,273 DEBUG [M:0;66d3634da959:38221 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:37:01,273 DEBUG [M:0;66d3634da959:38221 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-27T03:37:01,273 DEBUG [M:0;66d3634da959:38221 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:37:01,273 INFO [M:0;66d3634da959:38221 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-27T03:37:01,293 DEBUG [M:0;66d3634da959:38221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9246da9c80ba4e16a4d9332d0a433e29 is 82, key is hbase:meta,,1/info:regioninfo/1732678620831/Put/seqid=0 2024-11-27T03:37:01,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741839_1015 (size=5672) 2024-11-27T03:37:01,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741839_1015 (size=5672) 2024-11-27T03:37:01,297 INFO [M:0;66d3634da959:38221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9246da9c80ba4e16a4d9332d0a433e29 2024-11-27T03:37:01,316 DEBUG [M:0;66d3634da959:38221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bbe25df753d34d24a43c78208c4ccd90 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732678620935/Put/seqid=0 2024-11-27T03:37:01,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741840_1016 (size=5275) 2024-11-27T03:37:01,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741840_1016 (size=5275) 2024-11-27T03:37:01,321 INFO [M:0;66d3634da959:38221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bbe25df753d34d24a43c78208c4ccd90 2024-11-27T03:37:01,338 DEBUG [M:0;66d3634da959:38221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ef114dea5095494ba620532de9090b38 is 69, key is 66d3634da959,34383,1732678619831/rs:state/1732678620298/Put/seqid=0 2024-11-27T03:37:01,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741841_1017 (size=5156) 2024-11-27T03:37:01,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741841_1017 (size=5156) 2024-11-27T03:37:01,342 INFO [M:0;66d3634da959:38221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ef114dea5095494ba620532de9090b38 2024-11-27T03:37:01,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:37:01,344 INFO [RS:0;66d3634da959:34383 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:37:01,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34383-0x1017a5d8da70001, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:37:01,344 INFO [RS:0;66d3634da959:34383 {}] regionserver.HRegionServer(1031): Exiting; stopping=66d3634da959,34383,1732678619831; zookeeper connection closed. 2024-11-27T03:37:01,345 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5fe30a29 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5fe30a29 2024-11-27T03:37:01,345 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-27T03:37:01,360 DEBUG [M:0;66d3634da959:38221 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a32893e666943b6aac67b264152b4f1 is 52, key is load_balancer_on/state:d/1732678620962/Put/seqid=0 2024-11-27T03:37:01,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741842_1018 (size=5056) 2024-11-27T03:37:01,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741842_1018 (size=5056) 2024-11-27T03:37:01,365 INFO [M:0;66d3634da959:38221 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a32893e666943b6aac67b264152b4f1 2024-11-27T03:37:01,369 DEBUG [M:0;66d3634da959:38221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9246da9c80ba4e16a4d9332d0a433e29 as hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9246da9c80ba4e16a4d9332d0a433e29 2024-11-27T03:37:01,373 INFO [M:0;66d3634da959:38221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9246da9c80ba4e16a4d9332d0a433e29, entries=8, sequenceid=29, filesize=5.5 K 2024-11-27T03:37:01,373 DEBUG [M:0;66d3634da959:38221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bbe25df753d34d24a43c78208c4ccd90 as hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bbe25df753d34d24a43c78208c4ccd90 2024-11-27T03:37:01,377 INFO [M:0;66d3634da959:38221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bbe25df753d34d24a43c78208c4ccd90, entries=3, sequenceid=29, filesize=5.2 K 2024-11-27T03:37:01,378 DEBUG [M:0;66d3634da959:38221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ef114dea5095494ba620532de9090b38 as hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ef114dea5095494ba620532de9090b38 2024-11-27T03:37:01,382 INFO [M:0;66d3634da959:38221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ef114dea5095494ba620532de9090b38, entries=1, sequenceid=29, filesize=5.0 K 2024-11-27T03:37:01,382 DEBUG [M:0;66d3634da959:38221 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5a32893e666943b6aac67b264152b4f1 as hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5a32893e666943b6aac67b264152b4f1 2024-11-27T03:37:01,387 INFO [M:0;66d3634da959:38221 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44465/user/jenkins/test-data/d51b0e34-5479-a820-bc66-e9a042ce659e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5a32893e666943b6aac67b264152b4f1, entries=1, sequenceid=29, filesize=4.9 K 2024-11-27T03:37:01,388 INFO [M:0;66d3634da959:38221 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=29, compaction requested=false 2024-11-27T03:37:01,389 INFO [M:0;66d3634da959:38221 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-27T03:37:01,389 DEBUG [M:0;66d3634da959:38221 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732678621273Disabling compacts and flushes for region at 1732678621273Disabling writes for close at 1732678621273Obtaining lock to block concurrent updates at 1732678621273Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732678621273Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732678621274 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732678621275 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732678621275Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732678621293 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732678621293Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732678621301 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732678621315 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732678621315Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732678621325 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732678621337 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732678621337Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732678621346 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732678621360 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732678621360Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12d2708f: reopening flushed file at 1732678621368 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f00ca0c: reopening flushed file at 1732678621373 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72af30d5: reopening flushed file at 1732678621377 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ae0d3f: reopening flushed file at 1732678621382 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 115ms, sequenceid=29, compaction requested=false at 1732678621388 (+6 ms)Writing region close event to WAL at 1732678621389 (+1 ms)Closed at 1732678621389 2024-11-27T03:37:01,390 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,390 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,390 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,390 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,390 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-27T03:37:01,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43577 is added to blk_1073741830_1006 (size=10311) 2024-11-27T03:37:01,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45065 is added to blk_1073741830_1006 (size=10311) 2024-11-27T03:37:01,393 INFO [M:0;66d3634da959:38221 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-27T03:37:01,393 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-27T03:37:01,393 INFO [M:0;66d3634da959:38221 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38221 2024-11-27T03:37:01,393 INFO [M:0;66d3634da959:38221 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-27T03:37:01,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/MasterData/WALs/66d3634da959,45173,1732678428709/66d3634da959%2C45173%2C1732678428709.1732678429019 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:37:01,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,38459,1732678428873/66d3634da959%2C38459%2C1732678428873.1732678429510 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-27T03:37:01,542 INFO [M:0;66d3634da959:38221 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-27T03:37:01,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:37:01,542 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38221-0x1017a5d8da70000, quorum=127.0.0.1:57719, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-27T03:37:01,575 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2eb38aaf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:37:01,576 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59532081{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:37:01,576 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:37:01,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fa9d1ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:37:01,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36507c53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/hadoop.log.dir/,STOPPED} 2024-11-27T03:37:01,579 WARN [BP-1957594165-172.17.0.3-1732678617468 heartbeating to localhost/127.0.0.1:44465 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:37:01,579 WARN [BP-1957594165-172.17.0.3-1732678617468 heartbeating to localhost/127.0.0.1:44465 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1957594165-172.17.0.3-1732678617468 (Datanode Uuid 0cebbe7c-0b91-403d-85ce-ace55edae811) service to localhost/127.0.0.1:44465 2024-11-27T03:37:01,579 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:37:01,579 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:37:01,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/data/data3/current/BP-1957594165-172.17.0.3-1732678617468 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:37:01,580 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/data/data4/current/BP-1957594165-172.17.0.3-1732678617468 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:37:01,580 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:37:01,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@115f614b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-27T03:37:01,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d45b9b7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:37:01,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:37:01,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54008d53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:37:01,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23266789{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/hadoop.log.dir/,STOPPED} 2024-11-27T03:37:01,588 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-27T03:37:01,588 WARN [BP-1957594165-172.17.0.3-1732678617468 heartbeating to localhost/127.0.0.1:44465 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-27T03:37:01,588 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-27T03:37:01,588 WARN [BP-1957594165-172.17.0.3-1732678617468 heartbeating to localhost/127.0.0.1:44465 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1957594165-172.17.0.3-1732678617468 (Datanode Uuid 61d1eb3d-1e65-44b4-b5f1-54e389a8b373) service to localhost/127.0.0.1:44465 2024-11-27T03:37:01,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/data/data1/current/BP-1957594165-172.17.0.3-1732678617468 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:37:01,589 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/cluster_c5bacda6-2418-eefe-a252-07ec022a08b7/data/data2/current/BP-1957594165-172.17.0.3-1732678617468 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-27T03:37:01,589 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-27T03:37:01,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a86d190{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-27T03:37:01,594 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a47ab0e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-27T03:37:01,594 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-27T03:37:01,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3910812a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-27T03:37:01,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@259cffcc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1775b49b-d27d-0c76-fa6b-b160ffd43fc1/hadoop.log.dir/,STOPPED} 2024-11-27T03:37:01,599 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-27T03:37:01,614 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-27T03:37:01,621 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=272 (was 233) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44465 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44465 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:44465 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44465 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44465 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44465 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44465 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44465 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=539 (was 521) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=51 (was 47) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7747 (was 7754) 2024-11-27T03:37:01,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44897/user/jenkins/test-data/ea8232b1-42b5-9f4b-15b7-d1d8f26e58a4/WALs/66d3634da959,40479,1732678430168/66d3634da959%2C40479%2C1732678430168.1732678430409 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more